mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 14:06:38 +00:00
Compare commits
124 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af250824f7 | ||
|
|
7f57491fac | ||
|
|
890fdc2996 | ||
|
|
d1a807840c | ||
|
|
74d7332b47 | ||
|
|
22d4f50c83 | ||
|
|
6dad58fc8f | ||
|
|
8231bc4395 | ||
|
|
baba51bc6a | ||
|
|
b64ccbe683 | ||
|
|
21a02c4fbe | ||
|
|
089bc3b2d4 | ||
|
|
285a165eba | ||
|
|
90b197450e | ||
|
|
0865061210 | ||
|
|
2e50f515d8 | ||
|
|
8be7dc7e83 | ||
|
|
0d4f747f8f | ||
|
|
de285e1043 | ||
|
|
7fde426e88 | ||
|
|
fa12d1476f | ||
|
|
92d0a1d8f0 | ||
|
|
2f46a088de | ||
|
|
1cc4df2bd7 | ||
|
|
feb2b18e6a | ||
|
|
012b938b54 | ||
|
|
a0e5baa171 | ||
|
|
7611e33bc7 | ||
|
|
2aafa9ebf3 | ||
|
|
f9f27b0b97 | ||
|
|
18128f48f5 | ||
|
|
2688847c2e | ||
|
|
1c605adb5e | ||
|
|
d0877d0dc0 | ||
|
|
2cd630fb2f | ||
|
|
b210986181 | ||
|
|
375a74f1e8 | ||
|
|
abd5a53045 | ||
|
|
aa394d1d8e | ||
|
|
bdcc7b0913 | ||
|
|
d7a908e6c0 | ||
|
|
c23a98ae90 | ||
|
|
f8a7c99092 | ||
|
|
29b020999d | ||
|
|
2f0a57898f | ||
|
|
1ad20d6eb8 | ||
|
|
de000b74c8 | ||
|
|
d860d92dc8 | ||
|
|
3a19fe4e7d | ||
|
|
26a468f17a | ||
|
|
a6f3b33928 | ||
|
|
8ef215cc7e | ||
|
|
2c155a12bd | ||
|
|
e1141c3ec0 | ||
|
|
b635ecc6c1 | ||
|
|
a7b5cf7aa6 | ||
|
|
719ccd4f7f | ||
|
|
7ab8c7dde4 | ||
|
|
eb002eb667 | ||
|
|
a1638cdf4c | ||
|
|
091406877a | ||
|
|
84970ac086 | ||
|
|
d86f318010 | ||
|
|
853d615673 | ||
|
|
cd9a740e2b | ||
|
|
c70e7674a5 | ||
|
|
d3e3835c29 | ||
|
|
592c8a8d69 | ||
|
|
6f6a479535 | ||
|
|
d01c66986c | ||
|
|
823ffb7597 | ||
|
|
a90f9cda0f | ||
|
|
31d4c28124 | ||
|
|
e880889f07 | ||
|
|
a283608812 | ||
|
|
8251ddd176 | ||
|
|
4f0a3a89ab | ||
|
|
27cc1072fe | ||
|
|
eb9cf56dee | ||
|
|
3c20887433 | ||
|
|
37d1c4e958 | ||
|
|
33b6e17b2d | ||
|
|
1a9d4afdd6 | ||
|
|
9e198c55a4 | ||
|
|
b309a05bde | ||
|
|
123a055242 | ||
|
|
9308108284 | ||
|
|
0ecf3cd792 | ||
|
|
801444b35b | ||
|
|
f4ab322e5b | ||
|
|
72de199528 | ||
|
|
304972580d | ||
|
|
6322bb124f | ||
|
|
cb6a91b705 | ||
|
|
4d9fb1be72 | ||
|
|
27e26037e3 | ||
|
|
e09497116f | ||
|
|
3099e10555 | ||
|
|
3900504504 | ||
|
|
2c5e30d920 | ||
|
|
b348c245e8 | ||
|
|
578bcc4959 | ||
|
|
31a30474f1 | ||
|
|
ce1005add8 | ||
|
|
6107a59306 | ||
|
|
47656b16bd | ||
|
|
8fc47669be | ||
|
|
1a67ca54b6 | ||
|
|
c73f52338d | ||
|
|
c5f23b4e64 | ||
|
|
411954cf9d | ||
|
|
56be7c63d5 | ||
|
|
6ffe504f7e | ||
|
|
daa6f3d111 | ||
|
|
85fdfb44b8 | ||
|
|
33879449a2 | ||
|
|
462a136673 | ||
|
|
d5e39892cf | ||
|
|
ec0d863c29 | ||
|
|
afc3655a41 | ||
|
|
e25e96a62e | ||
|
|
23d92cfcae | ||
|
|
1258703f23 | ||
|
|
8841091f9c |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,3 +3,5 @@ VERSION
|
||||
*.swp
|
||||
*.un~
|
||||
output/
|
||||
.vscode
|
||||
.idea
|
||||
23
.golangci.yaml
Normal file
23
.golangci.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- golint
|
||||
- govet
|
||||
- gofmt
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- don't use underscores in Go names
|
||||
- exported type .+ should have comment or be unexported
|
||||
exclude-rules:
|
||||
- # Golint has many capitalisation complaints on WMI class names
|
||||
text: "`?\\w+`? should be `?\\w+`?"
|
||||
linters:
|
||||
- golint
|
||||
10
.promu.yml
10
.promu.yml
@@ -4,11 +4,11 @@ build:
|
||||
binaries:
|
||||
- name: wmi_exporter
|
||||
ldflags: |
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
-X github.com/prometheus/common/version.Branch={{.Branch}}
|
||||
-X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||
-X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||
tarball:
|
||||
files:
|
||||
- LICENSE
|
||||
|
||||
175
Gopkg.lock
generated
175
Gopkg.lock
generated
@@ -1,175 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3ccf8ba7afe02fd470c4f07d6eea4d0e6875da3d129f95b925f2003ce5dd2024"
|
||||
name = "github.com/StackExchange/wmi"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f3793f8a708522400cef1dba23385e901aede5519f68971fd69938ef330b07a1"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fdd419e104ec26bb5bd63cc62637c640453ed2929a7453f3afadbd9a0223da66"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:cb0535f5823b47df7dcb9768ebb6c000b79ad115472910c70efe93c9ed9b2315"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f9adc21a937e5da643ea14a3488cb7506788876737a5e205394e508627a6eec8"
|
||||
name = "github.com/dimchansky/utfbom"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cb4e216bd9f58866f42dc65893455b24f879b026fdaa1ecc3aafff625fdb5a66"
|
||||
name = "github.com/go-ole/go-ole"
|
||||
packages = [
|
||||
".",
|
||||
"oleutil",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9f35c1344b56e5868d511d231f215edd0650aa572664f856444affdd256e43e4"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "NUT"
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ce98e83b2b9486b6a9ce5e44fd4097c64e8f2f0eaa6c5041a8f12d3aaa5c17b3"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"log",
|
||||
"model",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:61a95e8d3e39e94207fba1b56d3c2182a356a1e41017aa647f523ae964b6bb0c"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6989062eb7ccf25cf38bf4fe3dba097ee209f896cda42cefdca3927047bef7b6"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "182114d582623c1caa54f73de9c7224e23a48487"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ea69008276e11262595a1f9a279ffd51d93e21c32c13b0f81856e962c6f607dd"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows/registry",
|
||||
"windows/svc",
|
||||
"windows/svc/eventlog",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "8c0ece68c28377f4c326d85b94f8df0dace46f80"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:22b2dee6f30bc8601f087449a2a819df8388e54e9547349c658f14d8f8c590f2"
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/StackExchange/wmi",
|
||||
"github.com/dimchansky/utfbom",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/prometheus/client_model/go",
|
||||
"github.com/prometheus/common/expfmt",
|
||||
"github.com/prometheus/common/log",
|
||||
"github.com/prometheus/common/version",
|
||||
"golang.org/x/sys/windows/registry",
|
||||
"golang.org/x/sys/windows/svc",
|
||||
"gopkg.in/alecthomas/kingpin.v2",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
@@ -1,4 +0,0 @@
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
2
Makefile
2
Makefile
@@ -7,7 +7,7 @@ test:
|
||||
go test -v ./...
|
||||
|
||||
lint:
|
||||
gometalinter --vendor --config gometalinter.config ./...
|
||||
golangci-lint -c .golangci.yaml run
|
||||
|
||||
fmt:
|
||||
gofmt -l -w -s .
|
||||
|
||||
12
README.md
12
README.md
@@ -1,6 +1,6 @@
|
||||
# WMI exporter
|
||||
|
||||
[](https://ci.appveyor.com/project/martinlindhe/wmi-exporter)
|
||||
[](https://ci.appveyor.com/project/martinlindhe/wmi-exporter)
|
||||
|
||||
Prometheus exporter for Windows machines, using the WMI (Windows Management Instrumentation).
|
||||
|
||||
@@ -10,12 +10,15 @@ Prometheus exporter for Windows machines, using the WMI (Windows Management Inst
|
||||
Name | Description | Enabled by default
|
||||
---------|-------------|--------------------
|
||||
[ad](docs/collector.ad.md) | Active Directory Domain Services |
|
||||
[adfs](docs/collector.adfs.md) | Active Directory Federation Services |
|
||||
[cpu](docs/collector.cpu.md) | CPU usage | ✓
|
||||
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | ✓
|
||||
[container](docs/collector.container.md) | Container metrics |
|
||||
[dns](docs/collector.dns.md) | DNS Server |
|
||||
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[logon](docs/collector.logon.md) | User logon sessions |
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
|
||||
@@ -33,6 +36,7 @@ Name | Description | Enabled by default
|
||||
[service](docs/collector.service.md) | Service state metrics | ✓
|
||||
[system](docs/collector.system.md) | System calls | ✓
|
||||
[tcp](docs/collector.tcp.md) | TCP connections |
|
||||
[thermalzone](docs/collector.thermalzone.md) | Thermal information
|
||||
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | ✓
|
||||
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
|
||||
|
||||
@@ -65,6 +69,11 @@ Example service collector with a custom query.
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--collector.service.services-where ""Name LIKE 'sql%'"""
|
||||
```
|
||||
|
||||
On some older versions of Windows you may need to surround parameter values with double quotes to get the install command parsing properly:
|
||||
```powershell
|
||||
msiexec /i C:\Users\Administrator\Downloads\wmi_exporter.msi ENABLED_COLLECTORS="ad,iis,logon,memory,process,tcp,thermalzone" TEXTFILE_DIR="C:\custom_metrics\"
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
@@ -72,7 +81,6 @@ See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
|
||||
## Usage
|
||||
|
||||
go get -u github.com/golang/dep
|
||||
go get -u github.com/prometheus/promu
|
||||
go get -u github.com/martinlindhe/wmi_exporter
|
||||
cd $env:GOPATH/src/github.com/martinlindhe/wmi_exporter
|
||||
|
||||
28
appveyor.yml
28
appveyor.yml
@@ -2,18 +2,27 @@ version: "{build}"
|
||||
|
||||
os: Visual Studio 2017
|
||||
build: off
|
||||
stack: go 1.10
|
||||
stack: go 1.13
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GO111MODULE: on
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- mkdir %GOPATH%\bin
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- go get -u github.com/prometheus/promu
|
||||
- go get -u github.com/alecthomas/gometalinter && gometalinter --install
|
||||
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
- choco install gitversion.portable make -y
|
||||
- ps: |
|
||||
appveyor DownloadFile https://github.com/golangci/golangci-lint/releases/download/v1.21.0/golangci-lint-1.21.0-windows-amd64.zip
|
||||
Expand-Archive golangci-lint-1.21.0-windows-amd64.zip
|
||||
Move-Item golangci-lint-1.21.0-windows-amd64\golangci-lint-1.21.0-windows-amd64\golangci-lint.exe $env:GOPATH\bin\golangci-lint.exe
|
||||
- ps: |
|
||||
$env:GO111MODULE="off"
|
||||
go get -u github.com/prometheus/promu
|
||||
$env:GO111MODULE="on"
|
||||
|
||||
test_script:
|
||||
- make test
|
||||
@@ -23,6 +32,10 @@ after_test:
|
||||
|
||||
build_script:
|
||||
- ps: |
|
||||
# go mod download (or, if we don't call it, go build) will write every dependent package name to
|
||||
# stderr, which will be interpreted as an error and abort the build if ErrorActionPreference is Stop,
|
||||
# so we need to run it before setting the preference.
|
||||
go mod download
|
||||
$ErrorActionPreference = "Stop"
|
||||
gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
$Version = Get-Content VERSION
|
||||
@@ -39,12 +52,13 @@ after_build:
|
||||
return
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
$BuildVersion = Get-Content VERSION
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$Version = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
$MSIVersion = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
foreach($Arch in "amd64","386") {
|
||||
Write-Verbose "Building wmi_exporter $Version msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$Version-$Arch.exe -Version $Version -Arch "$Arch"
|
||||
Move-Item installer\Output\wmi_exporter-$Version-$Arch.msi output\$Arch\
|
||||
Write-Verbose "Building wmi_exporter $MSIVersion msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
|
||||
Move-Item installer\Output\wmi_exporter-$MSIVersion-$Arch.msi output\$Arch\
|
||||
}
|
||||
- promu checksum output\
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["ad"] = NewADCollector
|
||||
registerCollector("ad", NewADCollector)
|
||||
}
|
||||
|
||||
// A ADCollector is a Prometheus collector for WMI Win32_PerfRawData_DirectoryServices_DirectoryServices metrics
|
||||
@@ -454,7 +454,7 @@ func NewADCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ADCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ad metrics:", desc, err)
|
||||
return err
|
||||
|
||||
188
collector/adfs.go
Normal file
188
collector/adfs.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("adfs", newADFSCollector)
|
||||
}
|
||||
|
||||
type adfsCollector struct {
|
||||
adLoginConnectionFailures *prometheus.Desc
|
||||
certificateAuthentications *prometheus.Desc
|
||||
deviceAuthentications *prometheus.Desc
|
||||
extranetAccountLockouts *prometheus.Desc
|
||||
federatedAuthentications *prometheus.Desc
|
||||
passportAuthentications *prometheus.Desc
|
||||
passiveRequests *prometheus.Desc
|
||||
passwordChangeFailed *prometheus.Desc
|
||||
passwordChangeSucceeded *prometheus.Desc
|
||||
tokenRequests *prometheus.Desc
|
||||
windowsIntegratedAuthentications *prometheus.Desc
|
||||
}
|
||||
|
||||
// newADFSCollector constructs a new adfsCollector
|
||||
func newADFSCollector() (Collector, error) {
|
||||
const subsystem = "adfs"
|
||||
|
||||
return &adfsCollector{
|
||||
adLoginConnectionFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "ad_login_connection_failures"),
|
||||
"Total number of connection failures to an Active Directory domain controller",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
certificateAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "certificate_authentications"),
|
||||
"Total number of User Certificate authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
deviceAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "device_authentications"),
|
||||
"Total number of Device authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
extranetAccountLockouts: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "extranet_account_lockouts"),
|
||||
"Total number of Extranet Account Lockouts",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
federatedAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "federated_authentications"),
|
||||
"Total number of authentications from a federated source",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passportAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "passport_authentications"),
|
||||
"Total number of Microsoft Passport SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passiveRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "passive_requests"),
|
||||
"Total number of passive (browser-based) requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passwordChangeFailed: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "password_change_failed"),
|
||||
"Total number of failed password changes",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passwordChangeSucceeded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "password_change_succeeded"),
|
||||
"Total number of successful password changes",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
tokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "token_requests"),
|
||||
"Total number of token requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
windowsIntegratedAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "windows_integrated_authentications"),
|
||||
"Total number of Windows integrated authentications (Kerberos/NTLM)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type perflibADFS struct {
|
||||
AdLoginConnectionFailures float64 `perflib:"AD login Connection Failures"`
|
||||
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
|
||||
DeviceAuthentications float64 `perflib:"Device Authentications"`
|
||||
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
|
||||
FederatedAuthentications float64 `perflib:"Federated Authentications"`
|
||||
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
|
||||
PassiveRequests float64 `perflib:"Passive Requests"`
|
||||
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
|
||||
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
|
||||
TokenRequests float64 `perflib:"Token Requests"`
|
||||
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
|
||||
}
|
||||
|
||||
func (c *adfsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var adfsData []perflibADFS
|
||||
err := unmarshalObject(ctx.perfObjects["AD FS"], &adfsData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.adLoginConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AdLoginConnectionFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.certificateAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].CertificateAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.deviceAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].DeviceAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.extranetAccountLockouts,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExtranetAccountLockouts,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federatedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].FederatedAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passportAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassportAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passiveRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassiveRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeFailed,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeFailed,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeSucceeded,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeSucceeded,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.tokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].TokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.windowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WindowsIntegratedAuthentications,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
106
collector/collector.go
Normal file
106
collector/collector.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
// TODO: Make package-local
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
windowsEpoch = 116444736000000000
|
||||
)
|
||||
|
||||
// getWindowsVersion reads the version number of the OS from the Registry
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
type collectorBuilder func() (Collector, error)
|
||||
|
||||
var (
|
||||
builders = make(map[string]collectorBuilder)
|
||||
perfCounterDependencies = make(map[string]string)
|
||||
)
|
||||
|
||||
func registerCollector(name string, builder collectorBuilder, perfCounterNames ...string) {
|
||||
builders[name] = builder
|
||||
perfIndicies := make([]string, 0, len(perfCounterNames))
|
||||
for _, cn := range perfCounterNames {
|
||||
perfIndicies = append(perfIndicies, MapCounterToIndex(cn))
|
||||
}
|
||||
perfCounterDependencies[name] = strings.Join(perfIndicies, " ")
|
||||
}
|
||||
|
||||
func Available() []string {
|
||||
cs := make([]string, 0, len(builders))
|
||||
for c := range builders {
|
||||
cs = append(cs, c)
|
||||
}
|
||||
return cs
|
||||
}
|
||||
func Build(collector string) (Collector, error) {
|
||||
return builders[collector]()
|
||||
}
|
||||
func getPerfQuery(collectors []string) string {
|
||||
parts := make([]string, 0, len(collectors))
|
||||
for _, c := range collectors {
|
||||
if p := perfCounterDependencies[c]; p != "" {
|
||||
parts = append(parts, p)
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, " ")
|
||||
}
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
type ScrapeContext struct {
|
||||
perfObjects map[string]*perflib.PerfObject
|
||||
}
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
|
||||
func PrepareScrapeContext(collectors []string) (*ScrapeContext, error) {
|
||||
q := getPerfQuery(collectors) // TODO: Memoize
|
||||
objs, err := getPerflibSnapshot(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ScrapeContext{objs}, nil
|
||||
}
|
||||
282
collector/container.go
Normal file
282
collector/container.go
Normal file
@@ -0,0 +1,282 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("container", NewContainerMetricsCollector)
|
||||
}
|
||||
|
||||
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
|
||||
type ContainerMetricsCollector struct {
|
||||
// Presence
|
||||
ContainerAvailable *prometheus.Desc
|
||||
|
||||
// Number of containers
|
||||
ContainersCount *prometheus.Desc
|
||||
// memory
|
||||
UsageCommitBytes *prometheus.Desc
|
||||
UsageCommitPeakBytes *prometheus.Desc
|
||||
UsagePrivateWorkingSetBytes *prometheus.Desc
|
||||
|
||||
// CPU
|
||||
RuntimeTotal *prometheus.Desc
|
||||
RuntimeUser *prometheus.Desc
|
||||
RuntimeKernel *prometheus.Desc
|
||||
|
||||
// Network
|
||||
BytesReceived *prometheus.Desc
|
||||
BytesSent *prometheus.Desc
|
||||
PacketsReceived *prometheus.Desc
|
||||
PacketsSent *prometheus.Desc
|
||||
DroppedPacketsIncoming *prometheus.Desc
|
||||
DroppedPacketsOutgoing *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
|
||||
func NewContainerMetricsCollector() (Collector, error) {
|
||||
const subsystem = "container"
|
||||
return &ContainerMetricsCollector{
|
||||
ContainerAvailable: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available"),
|
||||
"Available",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
ContainersCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "count"),
|
||||
"Number of containers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
UsageCommitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"),
|
||||
"Memory Usage Commit Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsageCommitPeakBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"),
|
||||
"Memory Usage Commit Peak Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsagePrivateWorkingSetBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"),
|
||||
"Memory Usage Private Working Set Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"),
|
||||
"Total Run time in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeUser: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"),
|
||||
"Run Time in User mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeKernel: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"),
|
||||
"Run time in Kernel mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
BytesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"),
|
||||
"Bytes Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
BytesSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"),
|
||||
"Bytes Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"),
|
||||
"Packets Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"),
|
||||
"Packets Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsIncoming: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"),
|
||||
"Dropped Incoming Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsOutgoing: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"),
|
||||
"Dropped Outgoing Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// containerClose closes the container resource
|
||||
func containerClose(c hcsshim.Container) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
|
||||
// Types Container is passed to get the containers compute systems only
|
||||
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
|
||||
if err != nil {
|
||||
log.Error("Err in Getting containers:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(containers)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainersCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, containerDetails := range containers {
|
||||
containerId := containerDetails.ID
|
||||
|
||||
container, err := hcsshim.OpenContainer(containerId)
|
||||
if container != nil {
|
||||
defer containerClose(container)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("err in opening container: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cstats, err := container.Statistics()
|
||||
if err != nil {
|
||||
log.Error("err in fetching container Statistics: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
// HCS V1 is for docker runtime. Add the docker:// prefix on container_id
|
||||
containerId = "docker://" + containerId
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainerAvailable,
|
||||
prometheus.CounterValue,
|
||||
1,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitPeakBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitPeakBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsagePrivateWorkingSetBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeUser,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeKernel,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
|
||||
if len(cstats.Network) == 0 {
|
||||
log.Info("No Network Stats for container: ", containerId)
|
||||
continue
|
||||
}
|
||||
|
||||
networkStats := cstats.Network
|
||||
|
||||
for _, networkInterface := range networkStats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsIncoming,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsIncoming),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsOutgoing,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsOutgoing),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
389
collector/cpu.go
389
collector/cpu.go
@@ -5,27 +5,80 @@ package collector
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cpu"] = NewCPUCollector
|
||||
var deps string
|
||||
// See below for 6.05 magic value
|
||||
if getWindowsVersion() > 6.05 {
|
||||
deps = "Processor Information"
|
||||
} else {
|
||||
deps = "Processor"
|
||||
}
|
||||
registerCollector("cpu", newCPUCollector, deps)
|
||||
}
|
||||
|
||||
// A CPUCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Processor metrics
|
||||
type CPUCollector struct {
|
||||
type cpuCollectorBasic struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
}
|
||||
type cpuCollectorFull struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ClockInterruptsTotal *prometheus.Desc
|
||||
IdleBreakEventsTotal *prometheus.Desc
|
||||
ParkingStatus *prometheus.Desc
|
||||
ProcessorFrequencyMHz *prometheus.Desc
|
||||
ProcessorMaxFrequencyMHz *prometheus.Desc
|
||||
ProcessorPerformance *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCPUCollector constructs a new CPUCollector
|
||||
func NewCPUCollector() (Collector, error) {
|
||||
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
|
||||
func newCPUCollector() (Collector, error) {
|
||||
const subsystem = "cpu"
|
||||
return &CPUCollector{
|
||||
|
||||
version := getWindowsVersion()
|
||||
// For Windows 2008 (version 6.0) or earlier we only have the "Processor"
|
||||
// class. As of Windows 2008 R2 (version 6.1) the more detailed
|
||||
// "Processor Information" set is available (although some of the counters
|
||||
// are added in later versions, so we aren't guaranteed to get all of
|
||||
// them).
|
||||
// Value 6.05 was selected to split between Windows versions.
|
||||
if version < 6.05 {
|
||||
return &cpuCollectorBasic{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &cpuCollectorFull{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
@@ -38,7 +91,6 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
@@ -51,164 +103,273 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ClockInterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
IdleBreakEventsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ParkingStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequencyMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_performance"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CPUCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cpu metrics:", desc, err)
|
||||
type perflibProcessor struct {
|
||||
Name string
|
||||
C1Transitions float64 `perflib:"C1 Transitions/sec"`
|
||||
C2Transitions float64 `perflib:"C2 Transitions/sec"`
|
||||
C3Transitions float64 `perflib:"C3 Transitions/sec"`
|
||||
DPCRate float64 `perflib:"DPC Rate"`
|
||||
DPCsQueued float64 `perflib:"DPCs Queued/sec"`
|
||||
Interrupts float64 `perflib:"Interrupts/sec"`
|
||||
PercentC2Time float64 `perflib:"% C1 Time"`
|
||||
PercentC3Time float64 `perflib:"% C2 Time"`
|
||||
PercentC1Time float64 `perflib:"% C3 Time"`
|
||||
PercentDPCTime float64 `perflib:"% DPC Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
PercentInterruptTime float64 `perflib:"% Interrupt Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessor, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfOS_Processor docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx
|
||||
type Win32_PerfRawData_PerfOS_Processor struct {
|
||||
Name string
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
DPCRate uint32
|
||||
DPCsQueuedPersec uint32
|
||||
InterruptsPersec uint32
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentUserTime uint64
|
||||
}
|
||||
|
||||
/* NOTE: This is an alternative class, but it is not as widely available. Decide which to use
|
||||
type Win32_PerfRawData_Counters_ProcessorInformation struct {
|
||||
Name string
|
||||
AverageIdleTime uint64
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
ClockInterruptsPersec uint64
|
||||
DPCRate uint64
|
||||
DPCsQueuedPersec uint64
|
||||
IdleBreakEventsPersec uint64
|
||||
InterruptsPersec uint64
|
||||
ParkingStatus uint64
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentofMaximumFrequency uint64
|
||||
PercentPerformanceLimit uint64
|
||||
PercentPriorityTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentPrivilegedUtility uint64
|
||||
PercentProcessorPerformance uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentProcessorUtility uint64
|
||||
PercentUserTime uint64
|
||||
PerformanceLimitFlags uint64
|
||||
ProcessorFrequency uint64
|
||||
ProcessorStateFlags uint64
|
||||
}*/
|
||||
|
||||
func (c *CPUCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_Processor
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, data := range dst {
|
||||
if strings.Contains(data.Name, "_Total") {
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
core := data.Name
|
||||
|
||||
// These are only available from Win32_PerfRawData_Counters_ProcessorInformation, which is only available from Win2008R2+
|
||||
/*ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MaximumFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentofMaximumFrequency)/100*float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)*/
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC1Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC1Time,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC2Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC2Time,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC3Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC3Time,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentIdleTime,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentInterruptTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentInterruptTime,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentDPCTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentDPCTime,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentPrivilegedTime,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentUserTime,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.InterruptsPersec),
|
||||
cpu.Interrupts,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.DPCsQueuedPersec),
|
||||
cpu.DPCsQueued,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ClockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ParkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPerformance,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,13 +11,14 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cs"] = NewCSCollector
|
||||
registerCollector("cs", NewCSCollector)
|
||||
}
|
||||
|
||||
// A CSCollector is a Prometheus collector for WMI metrics
|
||||
type CSCollector struct {
|
||||
PhysicalMemoryBytes *prometheus.Desc
|
||||
LogicalProcessors *prometheus.Desc
|
||||
Hostname *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCSCollector ...
|
||||
@@ -37,12 +38,21 @@ func NewCSCollector() (Collector, error) {
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Hostname: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "hostname"),
|
||||
"Labeled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain",
|
||||
[]string{
|
||||
"hostname",
|
||||
"domain",
|
||||
"fqdn"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cs metrics:", desc, err)
|
||||
return err
|
||||
@@ -55,6 +65,9 @@ func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
type Win32_ComputerSystem struct {
|
||||
NumberOfLogicalProcessors uint32
|
||||
TotalPhysicalMemory uint64
|
||||
DNSHostname string
|
||||
Domain string
|
||||
Workgroup *string
|
||||
}
|
||||
|
||||
func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
@@ -79,5 +92,21 @@ func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
float64(dst[0].TotalPhysicalMemory),
|
||||
)
|
||||
|
||||
var fqdn string
|
||||
if dst[0].Workgroup == nil || dst[0].Domain != *dst[0].Workgroup {
|
||||
fqdn = dst[0].DNSHostname + "." + dst[0].Domain
|
||||
} else {
|
||||
fqdn = dst[0].DNSHostname
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Hostname,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
dst[0].DNSHostname,
|
||||
dst[0].Domain,
|
||||
fqdn,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["dns"] = NewDNSCollector
|
||||
registerCollector("dns", NewDNSCollector)
|
||||
}
|
||||
|
||||
// A DNSCollector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
|
||||
@@ -181,7 +181,7 @@ func NewDNSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting dns metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["hyperv"] = NewHyperVCollector
|
||||
registerCollector("hyperv", NewHyperVCollector)
|
||||
}
|
||||
|
||||
// HyperVCollector is a Prometheus collector for hyper-v
|
||||
@@ -597,7 +597,7 @@ func NewHyperVCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *HyperVCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectVmHealth(ch); err != nil {
|
||||
log.Error("failed collecting hyperV health status metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["iis"] = NewIISCollector
|
||||
registerCollector("iis", NewIISCollector)
|
||||
}
|
||||
|
||||
type simple_version struct {
|
||||
@@ -818,7 +818,7 @@ func NewIISCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *IISCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *IISCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting iis metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["logical_disk"] = NewLogicalDiskCollector
|
||||
registerCollector("logical_disk", NewLogicalDiskCollector, "LogicalDisk")
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -27,19 +26,22 @@ var (
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
// A LogicalDiskCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfDisk_LogicalDisk metrics
|
||||
// A LogicalDiskCollector is a Prometheus collector for perflib logicalDisk metrics
|
||||
type LogicalDiskCollector struct {
|
||||
RequestsQueued *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
RequestsQueued *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
ReadLatency *prometheus.Desc
|
||||
WriteLatency *prometheus.Desc
|
||||
ReadWriteLatency *prometheus.Desc
|
||||
|
||||
volumeWhitelistPattern *regexp.Regexp
|
||||
volumeBlacklistPattern *regexp.Regexp
|
||||
@@ -127,6 +129,27 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
WriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "write_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadWriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_write_latency_seconds_total"),
|
||||
"Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
volumeWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeWhitelist)),
|
||||
volumeBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeBlacklist)),
|
||||
}, nil
|
||||
@@ -134,8 +157,8 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting logical_disk metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
@@ -145,25 +168,27 @@ func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
|
||||
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
type Win32_PerfRawData_PerfDisk_LogicalDisk struct {
|
||||
type logicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength uint32
|
||||
DiskReadBytesPerSec uint64
|
||||
DiskReadsPerSec uint32
|
||||
DiskWriteBytesPerSec uint64
|
||||
DiskWritesPerSec uint32
|
||||
PercentDiskReadTime uint64
|
||||
PercentDiskWriteTime uint64
|
||||
PercentFreeSpace uint32
|
||||
PercentFreeSpace_Base uint32
|
||||
PercentIdleTime uint64
|
||||
SplitIOPerSec uint32
|
||||
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
|
||||
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
|
||||
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
|
||||
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
|
||||
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
|
||||
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
|
||||
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
|
||||
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
|
||||
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
|
||||
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
|
||||
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
|
||||
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
|
||||
}
|
||||
|
||||
func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
func (c *LogicalDiskCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []logicalDisk
|
||||
if err := unmarshalObject(ctx.perfObjects["LogicalDisk"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -177,77 +202,98 @@ func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestsQueued,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.CurrentDiskQueueLength),
|
||||
volume.CurrentDiskQueueLength,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskReadBytesPerSec),
|
||||
volume.DiskReadBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskReadsPerSec),
|
||||
volume.DiskReadsPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskWriteBytesPerSec),
|
||||
volume.DiskWriteBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WritesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskWritesPerSec),
|
||||
volume.DiskWritesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentDiskReadTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentDiskReadTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentDiskWriteTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentDiskWriteTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeSpace,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.PercentFreeSpace)*1024*1024,
|
||||
volume.PercentFreeSpace_Base*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalSpace,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.PercentFreeSpace_Base)*1024*1024,
|
||||
volume.PercentFreeSpace*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentIdleTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SplitIOs,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.SplitIOPerSec),
|
||||
volume.SplitIOPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerRead*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerWrite*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadWriteLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerTransfer*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
}
|
||||
|
||||
199
collector/logon.go
Normal file
199
collector/logon.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("logon", NewLogonCollector)
|
||||
}
|
||||
|
||||
// A LogonCollector is a Prometheus collector for WMI metrics
|
||||
type LogonCollector struct {
|
||||
LogonType *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewLogonCollector ...
|
||||
func NewLogonCollector() (Collector, error) {
|
||||
const subsystem = "logon"
|
||||
|
||||
return &LogonCollector{
|
||||
LogonType: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "logon_type"),
|
||||
"Number of active logon sessions (LogonSession.LogonType)",
|
||||
[]string{"status"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogonCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting user metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_LogonSession docs:
|
||||
// - https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession
|
||||
type Win32_LogonSession struct {
|
||||
LogonType uint32
|
||||
}
|
||||
|
||||
func (c *LogonCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_LogonSession
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Init counters
|
||||
system := 0
|
||||
interactive := 0
|
||||
network := 0
|
||||
batch := 0
|
||||
service := 0
|
||||
proxy := 0
|
||||
unlock := 0
|
||||
networkcleartext := 0
|
||||
newcredentials := 0
|
||||
remoteinteractive := 0
|
||||
cachedinteractive := 0
|
||||
cachedremoteinteractive := 0
|
||||
cachedunlock := 0
|
||||
|
||||
for _, entry := range dst {
|
||||
switch entry.LogonType {
|
||||
case 0:
|
||||
system++
|
||||
case 2:
|
||||
interactive++
|
||||
case 3:
|
||||
network++
|
||||
case 4:
|
||||
batch++
|
||||
case 5:
|
||||
service++
|
||||
case 6:
|
||||
proxy++
|
||||
case 7:
|
||||
unlock++
|
||||
case 8:
|
||||
networkcleartext++
|
||||
case 9:
|
||||
newcredentials++
|
||||
case 10:
|
||||
remoteinteractive++
|
||||
case 11:
|
||||
cachedinteractive++
|
||||
case 12:
|
||||
cachedremoteinteractive++
|
||||
case 13:
|
||||
cachedunlock++
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(system),
|
||||
"system",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(interactive),
|
||||
"interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(network),
|
||||
"network",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(batch),
|
||||
"batch",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(service),
|
||||
"service",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(proxy),
|
||||
"proxy",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(unlock),
|
||||
"unlock",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(networkcleartext),
|
||||
"network_clear_text",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(newcredentials),
|
||||
"new_credentials",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedinteractive),
|
||||
"cached_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"cached_remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedunlock),
|
||||
"cached_unlock",
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
@@ -6,16 +6,15 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["memory"] = NewMemoryCollector
|
||||
registerCollector("memory", NewMemoryCollector, "Memory")
|
||||
}
|
||||
|
||||
// A MemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Memory metrics
|
||||
// A MemoryCollector is a Prometheus collector for perflib Memory metrics
|
||||
type MemoryCollector struct {
|
||||
AvailableBytes *prometheus.Desc
|
||||
CacheBytes *prometheus.Desc
|
||||
@@ -256,248 +255,247 @@ func NewMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting memory metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfOS_Memory struct {
|
||||
AvailableBytes uint64
|
||||
AvailableKBytes uint64
|
||||
AvailableMBytes uint64
|
||||
CacheBytes uint64
|
||||
CacheBytesPeak uint64
|
||||
CacheFaultsPersec uint32
|
||||
CommitLimit uint64
|
||||
CommittedBytes uint64
|
||||
DemandZeroFaultsPersec uint32
|
||||
FreeAndZeroPageListBytes uint64
|
||||
FreeSystemPageTableEntries uint32
|
||||
ModifiedPageListBytes uint64
|
||||
PageFaultsPersec uint32
|
||||
PageReadsPersec uint32
|
||||
PagesInputPersec uint32
|
||||
PagesOutputPersec uint32
|
||||
PagesPersec uint32
|
||||
PageWritesPersec uint32
|
||||
PoolNonpagedAllocs uint32
|
||||
PoolNonpagedBytes uint64
|
||||
PoolPagedAllocs uint32
|
||||
PoolPagedBytes uint64
|
||||
PoolPagedResidentBytes uint64
|
||||
StandbyCacheCoreBytes uint64
|
||||
StandbyCacheNormalPriorityBytes uint64
|
||||
StandbyCacheReserveBytes uint64
|
||||
SystemCacheResidentBytes uint64
|
||||
SystemCodeResidentBytes uint64
|
||||
SystemCodeTotalBytes uint64
|
||||
SystemDriverResidentBytes uint64
|
||||
SystemDriverTotalBytes uint64
|
||||
TransitionFaultsPersec uint32
|
||||
TransitionPagesRePurposedPersec uint32
|
||||
WriteCopiesPersec uint32
|
||||
type memory struct {
|
||||
AvailableBytes float64 `perflib:"Available Bytes"`
|
||||
AvailableKBytes float64 `perflib:"Available KBytes"`
|
||||
AvailableMBytes float64 `perflib:"Available MBytes"`
|
||||
CacheBytes float64 `perflib:"Cache Bytes"`
|
||||
CacheBytesPeak float64 `perflib:"Cache Bytes Peak"`
|
||||
CacheFaultsPersec float64 `perflib:"Cache Faults/sec"`
|
||||
CommitLimit float64 `perflib:"Commit Limit"`
|
||||
CommittedBytes float64 `perflib:"Committed Bytes"`
|
||||
DemandZeroFaultsPersec float64 `perflib:"Demand Zero Faults/sec"`
|
||||
FreeAndZeroPageListBytes float64 `perflib:"Free & Zero Page List Bytes"`
|
||||
FreeSystemPageTableEntries float64 `perflib:"Free System Page Table Entries"`
|
||||
ModifiedPageListBytes float64 `perflib:"Modified Page List Bytes"`
|
||||
PageFaultsPersec float64 `perflib:"Page Faults/sec"`
|
||||
PageReadsPersec float64 `perflib:"Page Reads/sec"`
|
||||
PagesInputPersec float64 `perflib:"Pages Input/sec"`
|
||||
PagesOutputPersec float64 `perflib:"Pages Output/sec"`
|
||||
PagesPersec float64 `perflib:"Pages/sec"`
|
||||
PageWritesPersec float64 `perflib:"Page Writes/sec"`
|
||||
PoolNonpagedAllocs float64 `perflib:"Pool Nonpaged Allocs"`
|
||||
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
|
||||
PoolPagedAllocs float64 `perflib:"Pool Paged Allocs"`
|
||||
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
|
||||
PoolPagedResidentBytes float64 `perflib:"Pool Paged Resident Bytes"`
|
||||
StandbyCacheCoreBytes float64 `perflib:"Standby Cache Core Bytes"`
|
||||
StandbyCacheNormalPriorityBytes float64 `perflib:"Standby Cache Normal Priority Bytes"`
|
||||
StandbyCacheReserveBytes float64 `perflib:"Standby Cache Reserve Bytes"`
|
||||
SystemCacheResidentBytes float64 `perflib:"System Cache Resident Bytes"`
|
||||
SystemCodeResidentBytes float64 `perflib:"System Code Resident Bytes"`
|
||||
SystemCodeTotalBytes float64 `perflib:"System Code Total Bytes"`
|
||||
SystemDriverResidentBytes float64 `perflib:"System Driver Resident Bytes"`
|
||||
SystemDriverTotalBytes float64 `perflib:"System Driver Total Bytes"`
|
||||
TransitionFaultsPersec float64 `perflib:"Transition Faults/sec"`
|
||||
TransitionPagesRePurposedPersec float64 `perflib:"Transition Pages RePurposed/sec"`
|
||||
WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
|
||||
}
|
||||
|
||||
func (c *MemoryCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_Memory
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []memory
|
||||
if err := unmarshalObject(ctx.perfObjects["Memory"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AvailableBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].AvailableBytes),
|
||||
dst[0].AvailableBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CacheBytes),
|
||||
dst[0].CacheBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytesPeak,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CacheBytesPeak),
|
||||
dst[0].CacheBytesPeak,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CacheFaultsPersec),
|
||||
dst[0].CacheFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommitLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CommitLimit),
|
||||
dst[0].CommitLimit,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommittedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CommittedBytes),
|
||||
dst[0].CommittedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DemandZeroFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].DemandZeroFaultsPersec),
|
||||
dst[0].DemandZeroFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeAndZeroPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreeAndZeroPageListBytes),
|
||||
dst[0].FreeAndZeroPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeSystemPageTableEntries,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreeSystemPageTableEntries),
|
||||
dst[0].FreeSystemPageTableEntries,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ModifiedPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ModifiedPageListBytes),
|
||||
dst[0].ModifiedPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PageFaultsPersec),
|
||||
dst[0].PageFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageReadsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PageReadsPersec),
|
||||
dst[0].PageReadsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesReadTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PagesInputPersec),
|
||||
dst[0].PagesInputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesWrittenTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PagesOutputPersec),
|
||||
dst[0].PagesOutputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageOperationsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PagesPersec),
|
||||
dst[0].PagesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageWritesTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PageWritesPersec),
|
||||
dst[0].PageWritesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedAllocsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolNonpagedAllocs),
|
||||
dst[0].PoolNonpagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolNonpagedBytes),
|
||||
dst[0].PoolNonpagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedAllocsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolPagedAllocs),
|
||||
dst[0].PoolPagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolPagedBytes),
|
||||
dst[0].PoolPagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolPagedResidentBytes),
|
||||
dst[0].PoolPagedResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheCoreBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].StandbyCacheCoreBytes),
|
||||
dst[0].StandbyCacheCoreBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheNormalPriorityBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].StandbyCacheNormalPriorityBytes),
|
||||
dst[0].StandbyCacheNormalPriorityBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheReserveBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].StandbyCacheReserveBytes),
|
||||
dst[0].StandbyCacheReserveBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCacheResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemCacheResidentBytes),
|
||||
dst[0].SystemCacheResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemCodeResidentBytes),
|
||||
dst[0].SystemCodeResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemCodeTotalBytes),
|
||||
dst[0].SystemCodeTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemDriverResidentBytes),
|
||||
dst[0].SystemDriverResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemDriverTotalBytes),
|
||||
dst[0].SystemDriverTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TransitionFaultsPersec),
|
||||
dst[0].TransitionFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionPagesRepurposedTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TransitionPagesRePurposedPersec),
|
||||
dst[0].TransitionPagesRePurposedPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteCopiesTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].WriteCopiesPersec),
|
||||
dst[0].WriteCopiesPersec,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["msmq"] = NewMSMQCollector
|
||||
registerCollector("msmq", NewMSMQCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -33,7 +33,7 @@ type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
|
||||
func NewMSMQCollector() (Collector, error) {
|
||||
const subsystem = "msmq"
|
||||
|
||||
if *msmqWhereClause != "" {
|
||||
if *msmqWhereClause == "" {
|
||||
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func NewMSMQCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -91,7 +91,7 @@ func mssqlBuildWMIInstanceClass(suffix string, instance string) string {
|
||||
type mssqlCollectorsMap map[string]mssqlCollectorFunc
|
||||
|
||||
func mssqlAvailableClassCollectors() string {
|
||||
return "accessmethods,availreplica,bufman,databases,dbreplica,genstats,locks,memmgr,sqlstats"
|
||||
return "accessmethods,availreplica,bufman,databases,dbreplica,genstats,locks,memmgr,sqlstats,sqlerrors,transactions"
|
||||
}
|
||||
|
||||
func (c *MSSQLCollector) getMSSQLCollectors() mssqlCollectorsMap {
|
||||
@@ -105,6 +105,8 @@ func (c *MSSQLCollector) getMSSQLCollectors() mssqlCollectorsMap {
|
||||
mssqlCollectors["locks"] = c.collectLocks
|
||||
mssqlCollectors["memmgr"] = c.collectMemoryManager
|
||||
mssqlCollectors["sqlstats"] = c.collectSQLStats
|
||||
mssqlCollectors["sqlerrors"] = c.collectSQLErrors
|
||||
mssqlCollectors["transactions"] = c.collectTransactions
|
||||
|
||||
return mssqlCollectors
|
||||
}
|
||||
@@ -125,7 +127,7 @@ func mssqlExpandEnabledCollectors(enabled string) []string {
|
||||
}
|
||||
|
||||
func init() {
|
||||
Factories["mssql"] = NewMSSQLCollector
|
||||
registerCollector("mssql", NewMSSQLCollector)
|
||||
}
|
||||
|
||||
// A MSSQLCollector is a Prometheus collector for various WMI Win32_PerfRawData_MSSQLSERVER_* metrics
|
||||
@@ -177,7 +179,8 @@ type MSSQLCollector struct {
|
||||
AccessMethodsUsedtreepagecookie *prometheus.Desc
|
||||
AccessMethodsWorkfilesCreated *prometheus.Desc
|
||||
AccessMethodsWorktablesCreated *prometheus.Desc
|
||||
AccessMethodsWorktablesFromCacheRatio *prometheus.Desc
|
||||
AccessMethodsWorktablesFromCacheHits *prometheus.Desc
|
||||
AccessMethodsWorktablesFromCacheLookups *prometheus.Desc
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerAvailabilityReplica
|
||||
AvailReplicaBytesReceivedfromReplica *prometheus.Desc
|
||||
@@ -192,7 +195,8 @@ type MSSQLCollector struct {
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerBufferManager
|
||||
BufManBackgroundwriterpages *prometheus.Desc
|
||||
BufManBuffercachehitratio *prometheus.Desc
|
||||
BufManBuffercachehits *prometheus.Desc
|
||||
BufManBuffercachelookups *prometheus.Desc
|
||||
BufManCheckpointpages *prometheus.Desc
|
||||
BufManDatabasepages *prometheus.Desc
|
||||
BufManExtensionallocatedpages *prometheus.Desc
|
||||
@@ -250,7 +254,8 @@ type MSSQLCollector struct {
|
||||
DatabasesDBCCLogicalScanBytes *prometheus.Desc
|
||||
DatabasesGroupCommitTime *prometheus.Desc
|
||||
DatabasesLogBytesFlushed *prometheus.Desc
|
||||
DatabasesLogCacheHitRatio *prometheus.Desc
|
||||
DatabasesLogCacheHits *prometheus.Desc
|
||||
DatabasesLogCacheLookups *prometheus.Desc
|
||||
DatabasesLogCacheReads *prometheus.Desc
|
||||
DatabasesLogFilesSizeKB *prometheus.Desc
|
||||
DatabasesLogFilesUsedSizeKB *prometheus.Desc
|
||||
@@ -315,7 +320,8 @@ type MSSQLCollector struct {
|
||||
GenStatsUserConnections *prometheus.Desc
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerLocks
|
||||
LocksAverageWaitTimems *prometheus.Desc
|
||||
LocksWaitTime *prometheus.Desc
|
||||
LocksCount *prometheus.Desc
|
||||
LocksLockRequests *prometheus.Desc
|
||||
LocksLockTimeouts *prometheus.Desc
|
||||
LocksLockTimeoutstimeout0 *prometheus.Desc
|
||||
@@ -358,6 +364,24 @@ type MSSQLCollector struct {
|
||||
SQLStatsSQLReCompilations *prometheus.Desc
|
||||
SQLStatsUnsafeAutoParams *prometheus.Desc
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
|
||||
SQLErrorsTotal *prometheus.Desc
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerTransactions
|
||||
TransactionsTempDbFreeSpaceBytes *prometheus.Desc
|
||||
TransactionsLongestTransactionRunningSeconds *prometheus.Desc
|
||||
TransactionsNonSnapshotVersionActiveTotal *prometheus.Desc
|
||||
TransactionsSnapshotActiveTotal *prometheus.Desc
|
||||
TransactionsActiveTotal *prometheus.Desc
|
||||
TransactionsUpdateConflictsTotal *prometheus.Desc
|
||||
TransactionsUpdateSnapshotActiveTotal *prometheus.Desc
|
||||
TransactionsVersionCleanupRateBytes *prometheus.Desc
|
||||
TransactionsVersionGenerationRateBytes *prometheus.Desc
|
||||
TransactionsVersionStoreSizeBytes *prometheus.Desc
|
||||
TransactionsVersionStoreUnits *prometheus.Desc
|
||||
TransactionsVersionStoreCreationUnits *prometheus.Desc
|
||||
TransactionsVersionStoreTruncationUnits *prometheus.Desc
|
||||
|
||||
mssqlInstances mssqlInstancesType
|
||||
mssqlCollectors mssqlCollectorsMap
|
||||
mssqlChildCollectorFailure int
|
||||
@@ -636,12 +660,18 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
AccessMethodsWorktablesFromCacheRatio: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "accessmethods_worktables_from_cache_ratio"),
|
||||
AccessMethodsWorktablesFromCacheHits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "accessmethods_worktables_from_cache_hits"),
|
||||
"(AccessMethods.WorktablesFromCacheRatio)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
AccessMethodsWorktablesFromCacheLookups: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "accessmethods_worktables_from_cache_lookups"),
|
||||
"(AccessMethods.WorktablesFromCacheRatio_Base)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerAvailabilityReplica
|
||||
AvailReplicaBytesReceivedfromReplica: prometheus.NewDesc(
|
||||
@@ -706,12 +736,18 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
BufManBuffercachehitratio: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bufman_buffer_cache_hit_ratio"),
|
||||
BufManBuffercachehits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bufman_buffer_cache_hits"),
|
||||
"(BufferManager.Buffercachehitratio)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
BufManBuffercachelookups: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bufman_buffer_cache_lookups"),
|
||||
"(BufferManager.Buffercachehitratio_Base)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
BufManCheckpointpages: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bufman_checkpoint_pages"),
|
||||
"(BufferManager.Checkpointpages)",
|
||||
@@ -1034,12 +1070,18 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
[]string{"instance", "database"},
|
||||
nil,
|
||||
),
|
||||
DatabasesLogCacheHitRatio: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "databases_log_cache_hit_ratio"),
|
||||
DatabasesLogCacheHits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "databases_log_cache_hits"),
|
||||
"(Databases.LogCacheHitRatio)",
|
||||
[]string{"instance", "database"},
|
||||
nil,
|
||||
),
|
||||
DatabasesLogCacheLookups: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "databases_log_cache_lookups"),
|
||||
"(Databases.LogCacheHitRatio_Base)",
|
||||
[]string{"instance", "database"},
|
||||
nil,
|
||||
),
|
||||
DatabasesLogCacheReads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "databases_log_cache_reads"),
|
||||
"(Databases.LogCacheReads)",
|
||||
@@ -1404,9 +1446,15 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
),
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerLocks
|
||||
LocksAverageWaitTimems: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "locks_average_wait_seconds"),
|
||||
"(Locks.AverageWaitTimems)",
|
||||
LocksWaitTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "locks_wait_time_seconds"),
|
||||
"(Locks.AverageWaitTimems Total time in seconds which locks have been holding resources)",
|
||||
[]string{"instance", "resource"},
|
||||
nil,
|
||||
),
|
||||
LocksCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "locks_count"),
|
||||
"(Locks.AverageWaitTimems_Base count of how often requests have run into locks)",
|
||||
[]string{"instance", "resource"},
|
||||
nil,
|
||||
),
|
||||
@@ -1637,6 +1685,94 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
|
||||
SQLErrorsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sql_errors_total"),
|
||||
"(SQLErrors.Total)",
|
||||
[]string{"instance", "resource"},
|
||||
nil,
|
||||
),
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerTransactions
|
||||
TransactionsTempDbFreeSpaceBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_tempdb_free_space_bytes"),
|
||||
"(Transactions.FreeSpaceInTempDbKB)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsLongestTransactionRunningSeconds: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_longest_transaction_running_seconds"),
|
||||
"(Transactions.LongestTransactionRunningTime)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsNonSnapshotVersionActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_nonsnapshot_version_active_total"),
|
||||
"(Transactions.NonSnapshotVersionTransactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsSnapshotActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_snapshot_active_total"),
|
||||
"(Transactions.SnapshotTransactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_active_total"),
|
||||
"(Transactions.Transactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsUpdateConflictsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_update_conflicts_total"),
|
||||
"(Transactions.UpdateConflictRatio)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsUpdateSnapshotActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_update_snapshot_active_total"),
|
||||
"(Transactions.UpdateSnapshotTransactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionCleanupRateBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_cleanup_rate_bytes"),
|
||||
"(Transactions.VersionCleanupRateKBs)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionGenerationRateBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_generation_rate_bytes"),
|
||||
"(Transactions.VersionGenerationRateKBs)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreSizeBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_size_bytes"),
|
||||
"(Transactions.VersionStoreSizeKB)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreUnits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_units"),
|
||||
"(Transactions.VersionStoreUnitCount)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreCreationUnits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_creation_units"),
|
||||
"(Transactions.VersionStoreUnitCreation)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreTruncationUnits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_truncation_units"),
|
||||
"(Transactions.VersionStoreUnitTruncation)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
|
||||
mssqlInstances: getMSSQLInstances(),
|
||||
}
|
||||
|
||||
@@ -1687,7 +1823,7 @@ func (c *MSSQLCollector) execute(name string, fn mssqlCollectorFunc, ch chan<- p
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSSQLCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *MSSQLCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
enabled := mssqlExpandEnabledCollectors(*mssqlEnabledCollectors)
|
||||
@@ -1755,6 +1891,7 @@ type win32PerfRawDataSQLServerAccessMethods struct {
|
||||
WorkfilesCreatedPersec uint64
|
||||
WorktablesCreatedPersec uint64
|
||||
WorktablesFromCacheRatio uint64
|
||||
WorktablesFromCacheRatio_Base uint64
|
||||
}
|
||||
|
||||
func (c *MSSQLCollector) collectAccessMethods(ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
|
||||
@@ -2067,11 +2204,18 @@ func (c *MSSQLCollector) collectAccessMethods(ch chan<- prometheus.Metric, sqlIn
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AccessMethodsWorktablesFromCacheRatio,
|
||||
c.AccessMethodsWorktablesFromCacheHits,
|
||||
prometheus.CounterValue,
|
||||
float64(v.WorktablesFromCacheRatio),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AccessMethodsWorktablesFromCacheLookups,
|
||||
prometheus.CounterValue,
|
||||
float64(v.WorktablesFromCacheRatio_Base),
|
||||
sqlInstance,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -2174,6 +2318,7 @@ func (c *MSSQLCollector) collectAvailabilityReplica(ch chan<- prometheus.Metric,
|
||||
type win32PerfRawDataSQLServerBufferManager struct {
|
||||
BackgroundwriterpagesPersec uint64
|
||||
Buffercachehitratio uint64
|
||||
Buffercachehitratio_Base uint64
|
||||
CheckpointpagesPersec uint64
|
||||
Databasepages uint64
|
||||
Extensionallocatedpages uint64
|
||||
@@ -2219,12 +2364,19 @@ func (c *MSSQLCollector) collectBufferManager(ch chan<- prometheus.Metric, sqlIn
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BufManBuffercachehitratio,
|
||||
c.BufManBuffercachehits,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Buffercachehitratio),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BufManBuffercachelookups,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Buffercachehitratio_Base),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BufManCheckpointpages,
|
||||
prometheus.CounterValue,
|
||||
@@ -2575,7 +2727,7 @@ func (c *MSSQLCollector) collectDatabaseReplica(ch chan<- prometheus.Metric, sql
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DBReplicaTransactionDelay,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.TransactionDelay)*1000.0,
|
||||
float64(v.TransactionDelay)/1000.0,
|
||||
sqlInstance, replicaName,
|
||||
)
|
||||
}
|
||||
@@ -2596,6 +2748,7 @@ type win32PerfRawDataSQLServerDatabases struct {
|
||||
GroupCommitTimePersec uint64
|
||||
LogBytesFlushedPersec uint64
|
||||
LogCacheHitRatio uint64
|
||||
LogCacheHitRatio_Base uint64
|
||||
LogCacheReadsPersec uint64
|
||||
LogFilesSizeKB uint64
|
||||
LogFilesUsedSizeKB uint64
|
||||
@@ -2711,12 +2864,19 @@ func (c *MSSQLCollector) collectDatabases(ch chan<- prometheus.Metric, sqlInstan
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DatabasesLogCacheHitRatio,
|
||||
c.DatabasesLogCacheHits,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.LogCacheHitRatio),
|
||||
sqlInstance, dbName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DatabasesLogCacheLookups,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.LogCacheHitRatio_Base),
|
||||
sqlInstance, dbName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DatabasesLogCacheReads,
|
||||
prometheus.CounterValue,
|
||||
@@ -3191,6 +3351,7 @@ func (c *MSSQLCollector) collectGeneralStatistics(ch chan<- prometheus.Metric, s
|
||||
type win32PerfRawDataSQLServerLocks struct {
|
||||
Name string
|
||||
AverageWaitTimems uint64
|
||||
AverageWaitTimems_Base uint64
|
||||
LockRequestsPersec uint64
|
||||
LockTimeoutsPersec uint64
|
||||
LockTimeoutstimeout0Persec uint64
|
||||
@@ -3213,12 +3374,19 @@ func (c *MSSQLCollector) collectLocks(ch chan<- prometheus.Metric, sqlInstance s
|
||||
lockResourceName := v.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocksAverageWaitTimems,
|
||||
c.LocksWaitTime,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.AverageWaitTimems)/1000.0,
|
||||
sqlInstance, lockResourceName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocksCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.AverageWaitTimems_Base)/1000.0,
|
||||
sqlInstance, lockResourceName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocksLockRequests,
|
||||
prometheus.CounterValue,
|
||||
@@ -3558,3 +3726,162 @@ func (c *MSSQLCollector) collectSQLStats(ch chan<- prometheus.Metric, sqlInstanc
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type win32PerfRawDataSQLServerSQLErrors struct {
|
||||
Name string
|
||||
ErrorsPersec uint64
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerErrors docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object
|
||||
func (c *MSSQLCollector) collectSQLErrors(ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
|
||||
var dst []win32PerfRawDataSQLServerSQLErrors
|
||||
log.Debugf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance)
|
||||
|
||||
class := mssqlBuildWMIInstanceClass("SQLErrors", sqlInstance)
|
||||
q := queryAllForClassWhere(&dst, class, `Name <> '_Total'`)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
resource := v.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SQLErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.ErrorsPersec),
|
||||
sqlInstance, resource,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type win32PerfRawDataSqlServerTransactions struct {
|
||||
FreeSpaceintempdbKB uint64
|
||||
LongestTransactionRunningTime uint64
|
||||
NonSnapshotVersionTransactions uint64
|
||||
SnapshotTransactions uint64
|
||||
Transactions uint64
|
||||
Updateconflictratio uint64
|
||||
UpdateSnapshotTransactions uint64
|
||||
VersionCleanuprateKBPers uint64
|
||||
VersionGenerationrateKBPers uint64
|
||||
VersionStoreSizeKB uint64
|
||||
VersionStoreunitcount uint64
|
||||
VersionStoreunitcreation uint64
|
||||
VersionStoreunittruncation uint64
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
|
||||
func (c *MSSQLCollector) collectTransactions(ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
|
||||
var dst []win32PerfRawDataSqlServerTransactions
|
||||
log.Debugf("mssql_transactions collector iterating sql instance %s.", sqlInstance)
|
||||
|
||||
class := mssqlBuildWMIInstanceClass("Transactions", sqlInstance)
|
||||
q := queryAllForClass(&dst, class)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
v := dst[0]
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsTempDbFreeSpaceBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FreeSpaceintempdbKB*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsLongestTransactionRunningSeconds,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.LongestTransactionRunningTime),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsNonSnapshotVersionActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.NonSnapshotVersionTransactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.SnapshotTransactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.Transactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsUpdateConflictsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.Updateconflictratio),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsUpdateSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.UpdateSnapshotTransactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionCleanupRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.VersionCleanuprateKBPers*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionGenerationRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.VersionGenerationrateKBPers*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreSizeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.VersionStoreSizeKB*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreUnits,
|
||||
prometheus.CounterValue,
|
||||
float64(v.VersionStoreunitcount),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreCreationUnits,
|
||||
prometheus.CounterValue,
|
||||
float64(v.VersionStoreunitcreation),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreTruncationUnits,
|
||||
prometheus.CounterValue,
|
||||
float64(v.VersionStoreunittruncation),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["net"] = NewNetworkCollector
|
||||
registerCollector("net", NewNetworkCollector, "Network Interface")
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -28,7 +27,7 @@ var (
|
||||
nicNameToUnderscore = regexp.MustCompile("[^a-zA-Z0-9]")
|
||||
)
|
||||
|
||||
// A NetworkCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_NetworkInterface metrics
|
||||
// A NetworkCollector is a Prometheus collector for Perflib Network Interface metrics
|
||||
type NetworkCollector struct {
|
||||
BytesReceivedTotal *prometheus.Desc
|
||||
BytesSentTotal *prometheus.Desc
|
||||
@@ -132,8 +131,8 @@ func NewNetworkCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting net metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
@@ -141,34 +140,33 @@ func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
// mangleNetworkName mangles Network Adapter name (non-alphanumeric to _)
|
||||
// that is used in Win32_PerfRawData_Tcpip_NetworkInterface.
|
||||
// that is used in networkInterface.
|
||||
func mangleNetworkName(name string) string {
|
||||
return nicNameToUnderscore.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
|
||||
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
|
||||
type Win32_PerfRawData_Tcpip_NetworkInterface struct {
|
||||
BytesReceivedPerSec uint64
|
||||
BytesSentPerSec uint64
|
||||
BytesTotalPerSec uint64
|
||||
type networkInterface struct {
|
||||
BytesReceivedPerSec float64 `perflib:"Bytes Received/sec"`
|
||||
BytesSentPerSec float64 `perflib:"Bytes Sent/sec"`
|
||||
BytesTotalPerSec float64 `perflib:"Bytes Total/sec"`
|
||||
Name string
|
||||
PacketsOutboundDiscarded uint64
|
||||
PacketsOutboundErrors uint64
|
||||
PacketsPerSec uint64
|
||||
PacketsReceivedDiscarded uint64
|
||||
PacketsReceivedErrors uint64
|
||||
PacketsReceivedPerSec uint64
|
||||
PacketsReceivedUnknown uint64
|
||||
PacketsSentPerSec uint64
|
||||
CurrentBandwidth uint64
|
||||
PacketsOutboundDiscarded float64 `perflib:"Packets Outbound Discarded"`
|
||||
PacketsOutboundErrors float64 `perflib:"Packets Outbound Errors"`
|
||||
PacketsPerSec float64 `perflib:"Packets/sec"`
|
||||
PacketsReceivedDiscarded float64 `perflib:"Packets Received Discarded"`
|
||||
PacketsReceivedErrors float64 `perflib:"Packets Received Errors"`
|
||||
PacketsReceivedPerSec float64 `perflib:"Packets Received/sec"`
|
||||
PacketsReceivedUnknown float64 `perflib:"Packets Received Unknown"`
|
||||
PacketsSentPerSec float64 `perflib:"Packets Sent/sec"`
|
||||
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
|
||||
}
|
||||
|
||||
func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Tcpip_NetworkInterface
|
||||
func (c *NetworkCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []networkInterface
|
||||
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
if err := unmarshalObject(ctx.perfObjects["Network Interface"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -187,76 +185,75 @@ func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesReceivedPerSec),
|
||||
nic.BytesReceivedPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesSentPerSec),
|
||||
nic.BytesSentPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesTotalPerSec),
|
||||
nic.BytesTotalPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundDiscarded),
|
||||
nic.PacketsOutboundDiscarded,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundErrors),
|
||||
nic.PacketsOutboundErrors,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsPerSec),
|
||||
nic.PacketsPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedDiscarded),
|
||||
nic.PacketsReceivedDiscarded,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedErrors),
|
||||
nic.PacketsReceivedErrors,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedPerSec),
|
||||
nic.PacketsReceivedPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedUnknown,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedUnknown),
|
||||
nic.PacketsReceivedUnknown,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsSentPerSec),
|
||||
nic.PacketsSentPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentBandwidth,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.CurrentBandwidth),
|
||||
prometheus.GaugeValue,
|
||||
nic.CurrentBandwidth,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrexceptions"] = NewNETFramework_NETCLRExceptionsCollector
|
||||
registerCollector("netframework_clrexceptions", NewNETFramework_NETCLRExceptionsCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRExceptionsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrinterop"] = NewNETFramework_NETCLRInteropCollector
|
||||
registerCollector("netframework_clrinterop", NewNETFramework_NETCLRInteropCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRInteropCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics
|
||||
@@ -46,7 +46,7 @@ func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrjit"] = NewNETFramework_NETCLRJitCollector
|
||||
registerCollector("netframework_clrjit", NewNETFramework_NETCLRJitCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRJitCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRJitCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrloading"] = NewNETFramework_NETCLRLoadingCollector
|
||||
registerCollector("netframework_clrloading", NewNETFramework_NETCLRLoadingCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLoadingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics
|
||||
@@ -88,7 +88,7 @@ func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrlocksandthreads"] = NewNETFramework_NETCLRLocksAndThreadsCollector
|
||||
registerCollector("netframework_clrlocksandthreads", NewNETFramework_NETCLRLocksAndThreadsCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLocksAndThreadsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics
|
||||
@@ -74,7 +74,7 @@ func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrmemory"] = NewNETFramework_NETCLRMemoryCollector
|
||||
registerCollector("netframework_clrmemory", NewNETFramework_NETCLRMemoryCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRMemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics
|
||||
@@ -112,7 +112,7 @@ func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrremoting"] = NewNETFramework_NETCLRRemotingCollector
|
||||
registerCollector("netframework_clrremoting", NewNETFramework_NETCLRRemotingCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRRemotingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics
|
||||
@@ -67,7 +67,7 @@ func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrsecurity"] = NewNETFramework_NETCLRSecurityCollector
|
||||
registerCollector("netframework_clrsecurity", NewNETFramework_NETCLRSecurityCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRSecurityCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -12,11 +12,12 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["os"] = NewOSCollector
|
||||
registerCollector("os", NewOSCollector)
|
||||
}
|
||||
|
||||
// A OSCollector is a Prometheus collector for WMI metrics
|
||||
type OSCollector struct {
|
||||
OSInformation *prometheus.Desc
|
||||
PhysicalMemoryFreeBytes *prometheus.Desc
|
||||
PagingFreeBytes *prometheus.Desc
|
||||
VirtualMemoryFreeBytes *prometheus.Desc
|
||||
@@ -36,6 +37,12 @@ func NewOSCollector() (Collector, error) {
|
||||
const subsystem = "os"
|
||||
|
||||
return &OSCollector{
|
||||
OSInformation: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "info"),
|
||||
"OperatingSystem.Caption, OperatingSystem.Version",
|
||||
[]string{"product", "version"},
|
||||
nil,
|
||||
),
|
||||
PagingLimitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "paging_limit_bytes"),
|
||||
"OperatingSystem.SizeStoredInPagingFiles",
|
||||
@@ -113,7 +120,7 @@ func NewOSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting os metrics:", desc, err)
|
||||
return err
|
||||
@@ -124,9 +131,11 @@ func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
// Win32_OperatingSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
type Win32_OperatingSystem struct {
|
||||
Caption string
|
||||
FreePhysicalMemory uint64
|
||||
FreeSpaceInPagingFiles uint64
|
||||
FreeVirtualMemory uint64
|
||||
LocalDateTime time.Time
|
||||
MaxNumberOfProcesses uint32
|
||||
MaxProcessMemorySize uint64
|
||||
NumberOfProcesses uint32
|
||||
@@ -134,7 +143,7 @@ type Win32_OperatingSystem struct {
|
||||
SizeStoredInPagingFiles uint64
|
||||
TotalVirtualMemorySize uint64
|
||||
TotalVisibleMemorySize uint64
|
||||
LocalDateTime time.Time
|
||||
Version string
|
||||
}
|
||||
|
||||
func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
@@ -148,6 +157,14 @@ func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OSInformation,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
dst[0].Caption,
|
||||
dst[0].Version,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PhysicalMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
|
||||
107
collector/perflib.go
Normal file
107
collector/perflib.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
var nametable = perflib.QueryNameTable("Counter 009") // Reads the names in English TODO: validate that the English names are always present
|
||||
|
||||
func MapCounterToIndex(name string) string {
|
||||
return strconv.Itoa(int(nametable.LookupIndex(name)))
|
||||
}
|
||||
|
||||
func getPerflibSnapshot(objNames string) (map[string]*perflib.PerfObject, error) {
|
||||
objects, err := perflib.QueryPerformanceData(objNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexed := make(map[string]*perflib.PerfObject)
|
||||
for _, obj := range objects {
|
||||
indexed[obj.Name] = obj
|
||||
}
|
||||
return indexed, nil
|
||||
}
|
||||
|
||||
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("counter not found")
|
||||
}
|
||||
rv := reflect.ValueOf(vs)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return fmt.Errorf("%v is nil or not a pointer to slice", reflect.TypeOf(vs))
|
||||
}
|
||||
ev := rv.Elem()
|
||||
if ev.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("%v is not slice", reflect.TypeOf(vs))
|
||||
}
|
||||
|
||||
// Ensure sufficient length
|
||||
if ev.Cap() < len(obj.Instances) {
|
||||
nvs := reflect.MakeSlice(ev.Type(), len(obj.Instances), len(obj.Instances))
|
||||
ev.Set(nvs)
|
||||
}
|
||||
|
||||
for idx, instance := range obj.Instances {
|
||||
target := ev.Index(idx)
|
||||
rt := target.Type()
|
||||
|
||||
counters := make(map[string]*perflib.PerfCounter, len(instance.Counters))
|
||||
for _, ctr := range instance.Counters {
|
||||
if ctr.Def.IsBaseValue && !ctr.Def.IsNanosecondCounter {
|
||||
counters[ctr.Def.Name+"_Base"] = ctr
|
||||
} else {
|
||||
counters[ctr.Def.Name] = ctr
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
tag := f.Tag.Get("perflib")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ctr, found := counters[tag]
|
||||
if !found {
|
||||
log.Debugf("missing counter %q, have %v", tag, counterMapKeys(counters))
|
||||
continue
|
||||
}
|
||||
if !target.Field(i).CanSet() {
|
||||
return fmt.Errorf("tagged field %v cannot be written to", f.Name)
|
||||
}
|
||||
if fieldType := target.Field(i).Type(); fieldType != reflect.TypeOf((*float64)(nil)).Elem() {
|
||||
return fmt.Errorf("tagged field %v has wrong type %v, must be float64", f.Name, fieldType)
|
||||
}
|
||||
|
||||
switch ctr.Def.CounterType {
|
||||
case perflibCollector.PERF_ELAPSED_TIME:
|
||||
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
|
||||
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
|
||||
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
|
||||
default:
|
||||
target.Field(i).SetFloat(float64(ctr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Name != "" && target.FieldByName("Name").CanSet() {
|
||||
target.FieldByName("Name").SetString(instance.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func counterMapKeys(m map[string]*perflib.PerfCounter) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
125
collector/perflib_test.go
Normal file
125
collector/perflib_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
)
|
||||
|
||||
type simple struct {
|
||||
ValA float64 `perflib:"Something"`
|
||||
ValB float64 `perflib:"Something Else"`
|
||||
}
|
||||
|
||||
func TestUnmarshalPerflib(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
obj *perflib.PerfObject
|
||||
|
||||
expectedOutput []simple
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "nil check",
|
||||
obj: nil,
|
||||
expectedOutput: []simple{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "Simple",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple properties",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something Else",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 256,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123, ValB: 256}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple instances",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 321,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 231,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 321}, {ValA: 231}},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
output := make([]simple, 0)
|
||||
err := unmarshalObject(c.obj, &output)
|
||||
if err != nil && !c.expectError {
|
||||
t.Errorf("Did not expect error, got %q", err)
|
||||
}
|
||||
if err == nil && c.expectError {
|
||||
t.Errorf("Expected an error, but got ok")
|
||||
}
|
||||
|
||||
if err == nil && !reflect.DeepEqual(output, c.expectedOutput) {
|
||||
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["process"] = NewProcessCollector
|
||||
registerCollector("process", NewProcessCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -135,7 +135,7 @@ func NewProcessCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ProcessCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ProcessCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting process metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["service"] = NewserviceCollector
|
||||
registerCollector("service", NewserviceCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -64,7 +64,7 @@ func NewserviceCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting service metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -3,14 +3,12 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["system"] = NewSystemCollector
|
||||
registerCollector("system", NewSystemCollector, "System")
|
||||
}
|
||||
|
||||
// A SystemCollector is a Prometheus collector for WMI metrics
|
||||
@@ -69,8 +67,8 @@ func NewSystemCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting system metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
@@ -79,57 +77,50 @@ func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
|
||||
// Win32_PerfRawData_PerfOS_System docs:
|
||||
// - https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
type Win32_PerfRawData_PerfOS_System struct {
|
||||
ContextSwitchesPersec uint32
|
||||
ExceptionDispatchesPersec uint32
|
||||
Frequency_Object uint64
|
||||
ProcessorQueueLength uint32
|
||||
SystemCallsPersec uint32
|
||||
SystemUpTime uint64
|
||||
Threads uint32
|
||||
Timestamp_Object uint64
|
||||
type system struct {
|
||||
ContextSwitchesPersec float64 `perflib:"Context Switches/sec"`
|
||||
ExceptionDispatchesPersec float64 `perflib:"Exception Dispatches/sec"`
|
||||
ProcessorQueueLength float64 `perflib:"Processor Queue Length"`
|
||||
SystemCallsPersec float64 `perflib:"System Calls/sec"`
|
||||
SystemUpTime float64 `perflib:"System Up Time"`
|
||||
Threads float64 `perflib:"Threads"`
|
||||
}
|
||||
|
||||
func (c *SystemCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_System
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
func (c *SystemCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []system
|
||||
if err := unmarshalObject(ctx.perfObjects["System"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextSwitchesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ContextSwitchesPersec),
|
||||
dst[0].ContextSwitchesPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ExceptionDispatchesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ExceptionDispatchesPersec),
|
||||
dst[0].ExceptionDispatchesPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ProcessorQueueLength),
|
||||
dst[0].ProcessorQueueLength,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCallsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SystemCallsPersec),
|
||||
dst[0].SystemCallsPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemUpTime,
|
||||
prometheus.GaugeValue,
|
||||
// convert from Windows timestamp (1 jan 1601) to unix timestamp (1 jan 1970)
|
||||
float64(dst[0].SystemUpTime-116444736000000000)/float64(dst[0].Frequency_Object),
|
||||
dst[0].SystemUpTime,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Threads,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].Threads),
|
||||
dst[0].Threads,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["tcp"] = NewTCPCollector
|
||||
registerCollector("tcp", NewTCPCollector)
|
||||
}
|
||||
|
||||
// A TCPCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_TCPv4 metrics
|
||||
@@ -90,7 +90,7 @@ func NewTCPCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
@@ -136,7 +136,7 @@ func (c *TCPCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, e
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsEstablished,
|
||||
prometheus.CounterValue,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ConnectionsEstablished),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -54,7 +54,7 @@ type textFileCollector struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
Factories["textfile"] = NewTextFileCollector
|
||||
registerCollector("textfile", NewTextFileCollector)
|
||||
}
|
||||
|
||||
// NewTextFileCollector returns a new Collector exposing metrics read from files
|
||||
@@ -212,7 +212,7 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
// Update implements the Collector interface.
|
||||
func (c *textFileCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
error := 0.0
|
||||
mtimes := map[string]time.Time{}
|
||||
|
||||
|
||||
103
collector/thermalzone.go
Normal file
103
collector/thermalzone.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("thermalzone", NewThermalZoneCollector)
|
||||
}
|
||||
|
||||
// A thermalZoneCollector is a Prometheus collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics
|
||||
type thermalZoneCollector struct {
|
||||
PercentPassiveLimit *prometheus.Desc
|
||||
Temperature *prometheus.Desc
|
||||
ThrottleReasons *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewThermalZoneCollector ...
|
||||
func NewThermalZoneCollector() (Collector, error) {
|
||||
const subsystem = "thermalzone"
|
||||
return &thermalZoneCollector{
|
||||
Temperature: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "temperature_celsius"),
|
||||
"(Temperature)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
PercentPassiveLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "percent_passive_limit"),
|
||||
"(PercentPassiveLimit)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
ThrottleReasons: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "throttle_reasons"),
|
||||
"(ThrottleReasons)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *thermalZoneCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting thermalzone metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Counters_ThermalZoneInformation docs:
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/
|
||||
type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
|
||||
Name string
|
||||
|
||||
HighPrecisionTemperature uint32
|
||||
PercentPassiveLimit uint32
|
||||
ThrottleReasons uint32
|
||||
}
|
||||
|
||||
func (c *thermalZoneCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, info := range dst {
|
||||
//Divide by 10 and subtract 273.15 to convert decikelvin to celsius
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Temperature,
|
||||
prometheus.GaugeValue,
|
||||
(float64(info.HighPrecisionTemperature)/10.0)-273.15,
|
||||
info.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PercentPassiveLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.PercentPassiveLimit),
|
||||
info.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThrottleReasons,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.ThrottleReasons),
|
||||
info.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["vmware"] = NewVmwareCollector
|
||||
registerCollector("vmware", NewVmwareCollector)
|
||||
}
|
||||
|
||||
// A VmwareCollector is a Prometheus collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics
|
||||
@@ -162,7 +162,7 @@ func NewVmwareCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *VmwareCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectMem(ch); err != nil {
|
||||
log.Error("failed collecting vmware memory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -4,27 +4,9 @@ import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
func className(src interface{}) string {
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
|
||||
@@ -3,12 +3,14 @@ This directory contains documentation of the collectors in the WMI exporter, wit
|
||||
|
||||
# Collectors
|
||||
- [`ad`](collector.ad.md)
|
||||
- [`adfs`](collector.adfs.md)
|
||||
- [`cpu`](collector.cpu.md)
|
||||
- [`cs`](collector.cs.md)
|
||||
- [`dns`](collector.dns.md)
|
||||
- [`hyperv`](collector.hyperv.md)
|
||||
- [`iis`](collector.iis.md)
|
||||
- [`logical_disk`](collector.logical_disk.md)
|
||||
- [`logon`](collector.logon.md)
|
||||
- [`memory`](collector.memory.md)
|
||||
- [`msmq`](collector.msmq.md)
|
||||
- [`mssql`](collector.mssql.md)
|
||||
@@ -27,4 +29,4 @@ This directory contains documentation of the collectors in the WMI exporter, wit
|
||||
- [`system`](collector.system.md)
|
||||
- [`tcp`](collector.tcp.md)
|
||||
- [`textfile`](collector.textfile.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
|
||||
51
docs/collector.adfs.md
Normal file
51
docs/collector.adfs.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# adfs collector
|
||||
|
||||
The adfs collector exposes metrics about Active Directory Federation Services. Note that this collector has only been tested against ADFS 4.0 (2016).
|
||||
Other ADFS versions may work but are not tested.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `adfs`
|
||||
Data source | Perflib
|
||||
Counters | `AD FS`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_adfs_ad_login_connection_failures` | Total number of connection failures between the ADFS server and the Active Directory domain controller(s) | counter | None
|
||||
`wmi_adfs_certificate_authentications` | Total number of [User Certificate](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-user-certificate-authentication) authentications. I.E. smart cards or mobile devices with provisioned client certificates | counter | None
|
||||
`wmi_adfs_device_authentications` | Total number of [device authentications](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/device-authentication-controls-in-ad-fs) (SignedToken, clientTLS, PkeyAuth). Device authentication is only available on ADFS 2016 or later | counter | None
|
||||
`wmi_adfs_extranet_account_lockouts` | Total number of [extranet lockouts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-ad-fs-extranet-smart-lockout-protection). Requires the Extranet Lockout feature to be enabled | counter | None
|
||||
`wmi_adfs_federated_authentications` | Total number of authentications from federated sources. E.G. Office365 | counter | None
|
||||
`wmi_adfs_passport_authentications` | Total number of authentications from [Microsoft Passport](https://en.wikipedia.org/wiki/Microsoft_account) (now named Microsoft Account) | counter | None
|
||||
`wmi_adfs_password_change_failed` | Total number of failed password changes. The Password Change Portal must be enabled in the AD FS Management tool in order to allow user password changes | counter | None
|
||||
`wmi_adfs_password_change_succeeded` | Total number of succeeded password changes. The Password Change Portal must be enabled in the AD FS Management tool in order to allow user password changes | counter | None
|
||||
`wmi_adfs_token_requests` | Total number of requested access tokens | counter | None
|
||||
`wmi_adfs_windows_integrated_authentications` | Total number of Windows integrated authentications using Kerberos or NTLM | counter | None
|
||||
|
||||
### Example metric
|
||||
Show rate of device authentications in AD FS:
|
||||
```
|
||||
rate(wmi_adfs_device_authentications)[2m]
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
- alert: "HighExtranetLockouts"
|
||||
expr: "rate(wmi_adfs_extranet_account_lockouts)[2m] > 100"
|
||||
for: "10m"
|
||||
labels:
|
||||
severity: "high"
|
||||
annotations:
|
||||
summary: "High number of AD FS extranet lockouts"
|
||||
description: "High number of AD FS extranet lockouts may indicate a password spray attack.\n Server: {{ $labels.instance }}\n Number of lockouts: {{ $value }}"
|
||||
```
|
||||
42
docs/collector.container.md
Normal file
42
docs/collector.container.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# container collector
|
||||
|
||||
The container collector exposes metrics about containers running on system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `container`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_container_available` | Available | counter | `container_id`
|
||||
`wmi_container_count` | Number of containers | gauge | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_kernelmode` | Run time in Kernel mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_usermode` | Run Time in User mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_total` | Total Run time in Seconds | counter | `container_id`
|
||||
`wmi_container_memory_usage_commit_bytes` | Memory Usage Commit Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_commit_peak_bytes` | Memory Usage Commit Peak Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_private_working_set_bytes` | Memory Usage Private Working Set Bytes | gauge | `container_id`
|
||||
`wmi_container_network_receive_bytes_total` | Bytes Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_total` | Packets Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_dropped_total` | Dropped Incoming Packets on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_bytes_total` | Bytes Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_total` | Packets Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_dropped_total` | Dropped Outgoing Packets on Interface | counter | `container_id`, `interface`
|
||||
|
||||
### Example metric
|
||||
_wmi_container_network_receive_bytes_total{container_id="docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e",interface="822179E7-002C-4280-ABBA-28BCFE401826"} 9.3305343e+07_
|
||||
|
||||
This metric means that total _9.3305343e+07_ bytes received on interface _822179E7-002C-4280-ABBA-28BCFE401826_ for container _docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -1,31 +1,60 @@
|
||||
# cpu collector
|
||||
|
||||
The cpu collector exposes metrics about CPU usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cpu`
|
||||
Classes | [`Win32_PerfRawData_PerfOS_Processor`](https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_cstate_seconds_total` | Time spent in low-power idle states | counter | `core`, `state`
|
||||
`wmi_cpu_time_total` | Time that processor spent in different modes (idle, user, system, ...) | counter | `core`, `mode`
|
||||
`wmi_cpu_interrupts_total` | Total number of received and serviced hardware interrupts | counter | `core`
|
||||
`wmi_cpu_dpcs_total` | Total number of received and serviced deferred procedure calls (DPCs) | counter | `core`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
# cpu collector
|
||||
|
||||
The cpu collector exposes metrics about CPU usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cpu`
|
||||
Data source | Perflib
|
||||
Counters | `ProcessorInformation` (Windows Server 2008R2 and later) `Processor` (older versions)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
These metrics are available on all versions of Windows:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_cstate_seconds_total` | Time spent in low-power idle states | counter | `core`, `state`
|
||||
`wmi_cpu_time_total` | Time that processor spent in different modes (idle, user, system, ...) | counter | `core`, `mode`
|
||||
`wmi_cpu_interrupts_total` | Total number of received and serviced hardware interrupts | counter | `core`
|
||||
`wmi_cpu_dpcs_total` | Total number of received and serviced deferred procedure calls (DPCs) | counter | `core`
|
||||
|
||||
These metrics are only exposed on Windows Server 2008R2 and later:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_clock_interrupts_total` | Total number of received and serviced clock tick interrupts | `core`
|
||||
`wmi_cpu_idle_break_events_total` | Total number of time processor was woken from idle | `core`
|
||||
`wmi_cpu_parking_status` | Parking Status represents whether a processor is parked or not | `gauge`
|
||||
`wmi_cpu_core_frequency_mhz` | Core frequency in megahertz | `gauge`
|
||||
`wmi_cpu_processor_performance` | Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100% | `gauge`
|
||||
|
||||
### Example metric
|
||||
Show frequency of host CPU cores
|
||||
```
|
||||
wmi_cpu_core_frequency_mhz{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Show cpu usage by mode.
|
||||
```
|
||||
sum by (mode) (irate(wmi_cpu_time_total{instance="localhost"}[5m]))
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
# Alert on hosts with more than 80% CPU usage over a 10 minute period
|
||||
- alert: CpuUsage
|
||||
expr: 100 - (avg by (instance) (irate(wmi_cpu_time_total{mode="idle"}[2m])) * 100) > 80
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "CPU Usage (instance {{ $labels.instance }})"
|
||||
description: "CPU Usage is more than 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
|
||||
@@ -18,6 +18,7 @@ Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cs_logical_processors` | Number of installed logical processors | gauge | None
|
||||
`wmi_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
|
||||
`wmi_cs_hostname` | Labeled system hostname information | gauge | `hostname`, `domain`, `fqdn`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# hyperv collector
|
||||
# hyperv collector
|
||||
|
||||
The hyperv collector exposes metrics about the Hyper-V hypervisor
|
||||
|
||||
@@ -16,81 +16,81 @@ None
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_hyper_health_critical` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_health_ok` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_vid_physical_pages_allocated` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyper_vid_preferred_numa_node_index` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyper_vid_remote_physical_pages` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyper_root_partition_address_spaces` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_attached_devices` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_deposited_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_dma_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_interrupt_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_interrupt_mappings` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_interrupt_throttle_events` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_preferred_numa_node_index` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_gpa_space_modifications` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_io_tlb_flush_cost` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_io_tlb_flush` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_recommended_virtual_tlb_size` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_physical_pages_allocated` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_1G_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_1G_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_2M_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_2M_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_4K_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_4K_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_virtual_tlb_flush_entires` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_virtual_tlb_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_hypervisor_virtual_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_hypervisor_logical_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_host_cpu_guest_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_host_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_host_cpu_remote_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_host_cpu_total_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_vm_cpu_guest_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vm_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vm_cpu_remote_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vm_cpu_total_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vswitch_broadcast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_broadcast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_bytes_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_bytes_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_bytes_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_directed_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_directed_packets_send_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_extensions_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_extensions_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_learned_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_multicast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_multicast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_number_of_send_channel_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_number_of_vmq_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_flooded_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_purged_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_ethernet_bytes_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_bytes_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_bytes_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_frames_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_frames_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_frames_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_vm_device_error_count` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_queue_length` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_bytes_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_operations_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_bytes_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_operations_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_interface_bytes_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_bytes_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_incoming_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_outgoing_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_health_critical` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_health_ok` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_vid_physical_pages_allocated` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyperv_vid_preferred_numa_node_index` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyperv_vid_remote_physical_pages` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyperv_root_partition_address_spaces` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_attached_devices` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_deposited_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_dma_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_interrupt_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_interrupt_mappings` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_interrupt_throttle_events` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_preferred_numa_node_index` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_gpa_space_modifications` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_io_tlb_flush_cost` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_io_tlb_flush` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_recommended_virtual_tlb_size` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_physical_pages_allocated` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_1G_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_1G_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_2M_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_2M_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_4K_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_4K_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_virtual_tlb_flush_entires` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_virtual_tlb_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_hypervisor_virtual_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_hypervisor_logical_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_host_cpu_guest_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_host_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_host_cpu_remote_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_host_cpu_total_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_vm_cpu_guest_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vm_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vm_cpu_remote_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vm_cpu_total_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vswitch_broadcast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_broadcast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_bytes_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_bytes_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_bytes_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_directed_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_directed_packets_send_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_extensions_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_extensions_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_learned_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_multicast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_multicast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_number_of_send_channel_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_number_of_vmq_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_flooded_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_purged_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_ethernet_bytes_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_bytes_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_bytes_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_frames_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_frames_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_frames_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_vm_device_error_count` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_queue_length` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_bytes_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_operations_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_bytes_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_operations_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_interface_bytes_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_bytes_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_incoming_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_outgoing_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -1,44 +1,76 @@
|
||||
# logical_disk collector
|
||||
|
||||
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logical_disk`
|
||||
Classes | [`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.logical_disk.volume-whitelist`
|
||||
|
||||
If given, a disk needs to match the whitelist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
### `--collector.logical_disk.volume-blacklist`
|
||||
|
||||
If given, a disk needs to *not* match the blacklist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`requests_queued` | _Not yet documented_ | gauge | `volume`
|
||||
`read_bytes_total` | _Not yet documented_ | counter | `volume`
|
||||
`reads_total` | _Not yet documented_ | counter | `volume`
|
||||
`write_bytes_total` | _Not yet documented_ | counter | `volume`
|
||||
`writes_total` | _Not yet documented_ | counter | `volume`
|
||||
`read_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`write_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`free_bytes` | _Not yet documented_ | gauge | `volume`
|
||||
`size_bytes` | _Not yet documented_ | gauge | `volume`
|
||||
`idle_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`split_ios_total` | _Not yet documented_ | counter | `volume`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
# logical_disk collector
|
||||
|
||||
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logical_disk`
|
||||
Data source | Perflib
|
||||
Counters | `LogicalDisk` ([`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.logical_disk.volume-whitelist`
|
||||
|
||||
If given, a disk needs to match the whitelist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
### `--collector.logical_disk.volume-blacklist`
|
||||
|
||||
If given, a disk needs to *not* match the blacklist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `volume`
|
||||
`read_bytes_total` | Rate at which bytes are transferred from the disk during read operations | counter | `volume`
|
||||
`reads_total` | Rate of read operations on the disk | counter | `volume`
|
||||
`write_bytes_total` | Rate at which bytes are transferred to the disk during write operations | counter | `volume`
|
||||
`writes_total` | Rate of write operations on the disk | counter | `volume`
|
||||
`read_seconds_total` | Seconds the disk was busy servicing read requests | counter | `volume`
|
||||
`write_seconds_total` | Seconds the disk was busy servicing write requests | counter | `volume`
|
||||
`free_bytes` | Unused space of the disk in bytes | gauge | `volume`
|
||||
`size_bytes` | Total size of the disk in bytes | gauge | `volume`
|
||||
`idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `volume`
|
||||
`split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `volume`
|
||||
|
||||
### Example metric
|
||||
Query the rate of write operations to a disk
|
||||
```
|
||||
rate(wmi_logical_disk_read_bytes_total{instance="localhost", volume=~"C:"}[2m])
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Calculate rate of total IOPS for disk
|
||||
```
|
||||
rate(wmi_logical_disk_reads_total{instance="localhost", volume="C:"}[2m]) + rate(wmi_logical_disk_writes_total{instance="localhost", volume="C:"}[2m])
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
groups:
|
||||
- name: Windows Disk Alerts
|
||||
rules:
|
||||
|
||||
# Sends an alert when disk space usage is above 95%
|
||||
- alert: DiskSpaceUsage
|
||||
expr: 100.0 - 100 * (wmi_logical_disk_free_bytes / wmi_logical_disk_size_bytes) > 95
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Disk Space Usage (instance {{ $labels.instance }})"
|
||||
description: "Disk Space on Drive is used more than 95%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
|
||||
# Alerts on disks with over 85% space usage predicted to fill within the next four days
|
||||
- alert: DiskFilling
|
||||
expr: 100 * (wmi_logical_disk_free_bytes / wmi_logical_disk_size_bytes) < 15 and predict_linear(wmi_logical_disk_free_bytes[6h], 4 * 24 * 3600) < 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Disk full in four days (instance {{ $labels.instance }})"
|
||||
description: "{{ $labels.volume }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
|
||||
34
docs/collector.logon.md
Normal file
34
docs/collector.logon.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# logon collector
|
||||
|
||||
The logon collector exposes metrics detailing the active user logon sessions.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logon`
|
||||
Classes | [`Win32_LogonSession`](https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_logon_logon_type` | Number of active user logon sessions | gauge | status
|
||||
|
||||
### Example metric
|
||||
Query the total number of interactive logon sessions
|
||||
```
|
||||
wmi_logon_logon_type{status="interactive"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Query the total number of local and remote (I.E. Terminal Services) interactive sessions.
|
||||
```
|
||||
wmi_logon_logon_type{status=~"interactive|remoteinteractive"}
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -2,7 +2,10 @@
|
||||
|
||||
The memory collector exposes metrics about system memory usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `memory`
|
||||
Data source | Perflib
|
||||
Classes | `Win32_PerfRawData_PerfOS_Memory`
|
||||
Enabled by default? | Yes
|
||||
|
||||
@@ -17,25 +20,25 @@ Name | Description | Type | Labels
|
||||
`wmi_cs_logical_processors` | Number of installed logical processors | gauge | None
|
||||
`wmi_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
|
||||
`wmi_memory_available_bytes` | The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists | gauge | None
|
||||
`wmi_memory_cache_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_bytes_peak` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_commit_limit` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_committed_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_bytes` | Number of bytes currently being used by the file system cache | gauge | None
|
||||
`wmi_memory_cache_bytes_peak` | Maximum number of CacheBytes after the system was last restarted | gauge | None
|
||||
`wmi_memory_cache_faults_total` | Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) or from disk (hard fault) | gauge | None
|
||||
`wmi_memory_commit_limit` | Amount of virtual memory, in bytes, that can be committed without having to extend the paging file(s) | gauge | None
|
||||
`wmi_memory_committed_bytes` | Amount of committed virtual memory, in bytes | gauge | None
|
||||
`wmi_memory_demand_zero_faults_total` | The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space | gauge | None
|
||||
`wmi_memory_free_and_zero_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_free_system_page_table_entries` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_free_system_page_table_entries` | Number of page table entries not being used by the system | gauge | None
|
||||
`wmi_memory_modified_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_page_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_page_faults_total` | Overall rate at which faulted pages are handled by the processor | gauge | None
|
||||
`wmi_memory_swap_page_reads_total` | Number of disk page reads (a single read operation reading several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_swap_pages_read_total` | Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) | gauge | None
|
||||
`wmi_memory_swap_pages_written_total` | Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) | gauge | None
|
||||
`wmi_memory_swap_page_operations_total` | Total number of swap page read and writes (PagesPersec) | gauge | None
|
||||
`wmi_memory_swap_page_writes_total` | Number of disk page writes (a single write operation writing several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_pool_nonpaged_allocs_total` | The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written to disk, and must remain in physical memory as long as they are allocated | gauge | None
|
||||
`wmi_memory_pool_nonpaged_bytes_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_paged_allocs_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_paged_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_nonpaged_bytes_total` | Number of bytes in the non-paged pool | gauge | None
|
||||
`wmi_memory_pool_paged_allocs_total` | Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call | gauge | None
|
||||
`wmi_memory_pool_paged_bytes` | Number of bytes in the paged pool | gauge | None
|
||||
`wmi_memory_pool_paged_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_core_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_normal_priority_bytes` | _Not yet documented_ | gauge | None
|
||||
|
||||
@@ -5,14 +5,14 @@ The mssql collector exposes metrics about the MSSQL server
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `mssql`
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLErrors`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerTransactions`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collectors.mssql.classes-enabled`
|
||||
|
||||
Comma-separated list of MSSQL WMI classes to use. Supported values are `accessmethods`, `availreplica`, `bufman`, `databases`, `dbreplica`, `genstats`, `locks`, `memmgr` and `sqlstats`.
|
||||
Comma-separated list of MSSQL WMI classes to use. Supported values are `accessmethods`, `availreplica`, `bufman`, `databases`, `dbreplica`, `genstats`, `locks`, `memmgr`, `sqlstats`, `sqlerrors` and `transactions`.
|
||||
|
||||
### `--collectors.mssql.class-print`
|
||||
|
||||
@@ -24,80 +24,80 @@ Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_mssql_collector_duration_seconds` | The time taken for each sub-collector to return | counter | `collector`, `instance`
|
||||
`wmi_mssql_collector_success` | 1 if sub-collector succeeded, 0 otherwise | counter | `collector`, `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_cleanups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_uses` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_read_aheads` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pulls` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pushes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_aus` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_rowsets` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_cleanups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_skips` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_deallocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_allocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanup_failures` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_failures` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_failures` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_forwarded_records` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_page_fetches` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_full_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_index_searches` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_insysxact_waits` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_destroys` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_destroys` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_truncations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_mixed_page_allocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compression_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_deallocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_allocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compressions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_splits` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_probe_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_range_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_scan_point_revalidations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_ghost_record_skips` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_table_lock_escalations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_uses` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_uses` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_workfile_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_from_cache_ratio` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_availreplica_received_from_replica_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_replica_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_transport_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_initiated_flow_controls` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_flow_control_wait_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_receives_from_replica` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_resent_messages` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_replica` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_transport` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_bufman_background_writer_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_buffer_cache_hit_ratio` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_checkpoint_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_database_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_allocated_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_free_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanups` | The total number of batches that were completed successfully by the background task that cleans up deferred dropped allocation units | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_cleanups` | The total number of allocation units that were successfully dropped the background task that cleans up deferred dropped allocation units. Each allocation unit drop requires multiple batches | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_creates` | The total count of large object (lob) values that were passed by reference. By-reference lobs are used in certain bulk operations to avoid the cost of passing them by value | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_uses` | The total count of by-reference lob values that were used. By-reference lobs are used in certain bulk operations to avoid the cost of passing them by-value | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_read_aheads` | The total count of lob pages on which readahead was issued | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pulls` | The total count of column values that were pulled in-row from off-row | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pushes` | The total count of column values that were pushed from in-row to off-row | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_aus` | The total number of allocation units waiting to be dropped by the background task that cleans up deferred dropped allocation units | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_rowsets` | The number of rowsets created as a result of aborted online index build operations that are waiting to be dropped by the background task that cleans up deferred dropped rowsets | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_cleanups` | The number of rowsets per second created as a result of aborted online index build operations that were successfully dropped by the background task that cleans up deferred dropped rowsets | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_skips` | The number of rowsets per second created as a result of aborted online index build operations that were skipped by the background task that cleans up deferred dropped rowsets created | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_deallocations` | Number of extents deallocated per second in all databases in this instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_allocations` | Number of extents allocated per second in all databases in this instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanup_failures` | The number of batches per second that failed and required retry, by the background task that cleans up deferred dropped allocation units. Failure could be due to lack of memory or disk space, hardware failure and other reasons | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_failures` | The number of times that a leaf page cookie could not be used during an index search since changes happened on the leaf page. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_failures` | The number of times that a tree page cookie could not be used during an index search since changes happened on the parent pages of those tree pages. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_forwarded_records` | Number of records per second fetched through forwarded record pointers | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_page_fetches` | Number of pages fetched per second by free space scans. These scans search for free space within pages already allocated to an allocation unit, to satisfy requests to insert or modify record fragments | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_scans` | Number of scans per second that were initiated to search for free space within pages already allocated to an allocation unit to insert or modify record fragment. Each scan may find multiple pages | counter | `instance`
|
||||
`wmi_mssql_accessmethods_full_scans` | Number of unrestricted full scans per second. These can be either base-table or full-index scans | counter | `instance`
|
||||
`wmi_mssql_accessmethods_index_searches` | Number of index searches per second. These are used to start a range scan, reposition a range scan, revalidate a scan point, fetch a single index record, and search down the index to locate where to insert a new row | counter | `instance`
|
||||
`wmi_mssql_accessmethods_insysxact_waits` | Number of times a reader needs to wait for a page because the InSysXact bit is set | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_creates` | Count of temporary lobs created | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_destroys` | Count of temporary lobs destroyed | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_creates` | Count of LOB Storage Service Providers (LobSSP) created. One worktable created per LobSSP | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_destroys` | Count of LobSSP destroyed | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_truncations` | Count of LobSSP truncated | counter | `instance`
|
||||
`wmi_mssql_accessmethods_mixed_page_allocations` | Number of pages allocated per second from mixed extents. These could be used for storing the IAM pages and the first eight pages that are allocated to an allocation unit | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compression_attempts` | Number of pages evaluated for page-level compression. Includes pages that were not compressed because significant savings could be achieved. Includes all objects in the instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_deallocations` | Number of pages deallocated per second in all databases in this instance of SQL Server. These include pages from mixed extents and uniform extents | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_allocations` | Number of pages allocated per second in all databases in this instance of SQL Server. These include pages allocations from both mixed extents and uniform extents | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compressions` | Number of data pages that are compressed by using PAGE compression. Includes all objects in the instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_splits` | Number of page splits per second that occur as the result of overflowing index pages | counter | `instance`
|
||||
`wmi_mssql_accessmethods_probe_scans` | Number of probe scans per second that are used to find at most one single qualified row in an index or base table directly | counter | `instance`
|
||||
`wmi_mssql_accessmethods_range_scans` | Number of qualified range scans through indexes per second | counter | `instance`
|
||||
`wmi_mssql_accessmethods_scan_point_revalidations` | Number of times per second that the scan point had to be revalidated to continue the scan | counter | `instance`
|
||||
`wmi_mssql_accessmethods_ghost_record_skips` | Number of ghosted records per second skipped during scans | counter | `instance`
|
||||
`wmi_mssql_accessmethods_table_lock_escalations` | Number of times locks on a table were escalated to the TABLE or HoBT granularity | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_uses` | Number of times a leaf page cookie is used successfully during an index search since no change happened on the leaf page. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_uses` | Number of times a tree page cookie is used successfully during an index search since no change happened on the parent page of the tree page. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_workfile_creates` | Number of work files created per second. For example, work files could be used to store temporary results for hash joins and hash aggregates | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_creates` | Number of work tables created per second. For example, work tables could be used to store temporary results for query spool, lob variables, XML variables, and cursors | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_from_cache_ratio` | Percentage of work tables created where the initial two pages of the work table were not allocated but were immediately available from the work table cache | counter | `instance`
|
||||
`wmi_mssql_availreplica_received_from_replica_bytes` | Number of bytes received from the availability replica per second. Pings and status updates will generate network traffic even on databases with no user updates | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_replica_bytes` | Number of bytes sent to the remote availability replica per second. On the primary replica this is the number of bytes sent to the secondary replica. On the secondary replica this is the number of bytes sent to the primary replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_transport_bytes` | Actual number of bytes sent per second over the network to the remote availability replica. On the primary replica this is the number of bytes sent to the secondary replica. On the secondary replica this is the number of bytes sent to the primary replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_initiated_flow_controls` | Time in milliseconds that log stream messages waited for send flow control, in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_flow_control_wait_seconds` | Number of times flow-control initiated in the last second. Flow Control Time (ms/sec) divided by Flow Control/sec is the average time per wait | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_receives_from_replica` | Number of Always On messages received from thereplica per second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_resent_messages` | Number of Always On messages resent in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_replica` | Number of Always On messages sent to this availability replica per second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_transport` | Actual number of Always On messages sent per second over the network to the remote availability replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_bufman_background_writer_pages` | Number of pages flushed to enforce the recovery interval settings | counter | `instance`
|
||||
`wmi_mssql_bufman_buffer_cache_hit_ratio` | Indicates the percentage of pages found in the buffer cache without having to read from disk. The ratio is the total number of cache hits divided by the total number of cache lookups over the last few thousand page accesses | counter | `instance`
|
||||
`wmi_mssql_bufman_checkpoint_pages` | Indicates the number of pages flushed to disk per second by a checkpoint or other operation that require all dirty pages to be flushed | counter | `instance`
|
||||
`wmi_mssql_bufman_database_pages` | Indicates the number of pages in the buffer pool with database content | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_allocated_pages` | Total number of non-free cache pages in the buffer pool extension file | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_free_pages` | Total number of free cache pages in the buffer pool extension file | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_in_use_as_percentage` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_outstanding_io` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_evictions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_reads` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_unreferenced_seconds` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_writes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_free_list_stalls` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_integral_controller_slope` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_lazywrites` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_life_expectancy_seconds` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_lookups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_reads` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_writes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_issuing_seconds` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_target_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_outstanding_io` | Percentage of the buffer pool extension paging file occupied by buffer manager pages | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_evictions` | Number of pages evicted from the buffer pool extension file per second | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_reads` | Number of pages read from the buffer pool extension file per second | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_unreferenced_seconds` | Average seconds a page will stay in the buffer pool extension without references to it | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_writes` | Number of pages written to the buffer pool extension file per second | counter | `instance`
|
||||
`wmi_mssql_bufman_free_list_stalls` | Indicates the number of requests per second that had to wait for a free page | counter | `instance`
|
||||
`wmi_mssql_bufman_integral_controller_slope` | The slope that integral controller for the buffer pool last used, times -10 billion | counter | `instance`
|
||||
`wmi_mssql_bufman_lazywrites` | Indicates the number of buffers written per second by the buffer manager's lazy writer | counter | `instance`
|
||||
`wmi_mssql_bufman_page_life_expectancy_seconds` | Indicates the number of seconds a page will stay in the buffer pool without references | counter | `instance`
|
||||
`wmi_mssql_bufman_page_lookups` | Indicates the number of requests per second to find a page in the buffer pool | counter | `instance`
|
||||
`wmi_mssql_bufman_page_reads` | Indicates the number of physical database page reads that are issued per second | counter | `instance`
|
||||
`wmi_mssql_bufman_page_writes` | Indicates the number of physical database page writes that are issued per second | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_pages` | Indicates the number of pages read per second in anticipation of use | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_issuing_seconds` | Time (microseconds) spent issuing readahead | counter | `instance`
|
||||
`wmi_mssql_bufman_target_pages` | Ideal number of pages in the buffer pool | counter | `instance`
|
||||
`wmi_mssql_dbreplica_database_flow_control_wait_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_database_initiated_flow_controls` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_received_file_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
@@ -112,130 +112,164 @@ Name | Description | Type | Labels
|
||||
`wmi_mssql_dbreplica_log_compression_cachemisses` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compressions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_decompressions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_remaining_for_undo` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_send_queue` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_mirrored_write_transactions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_recovery_queue_records` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_blocks` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_remaining_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redone_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_remaining_for_undo` | The amount of log, in bytes, remaining to complete the undo phase | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_send_queue` | Amount of log records in the log files of the primary database, in kilobytes, that haven't been sent to the secondary replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_mirrored_write_transactions` | Number of transactions that were written to the primary database and then waited to commit until the log was sent to the secondary database, in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_recovery_queue_records` | Amount of log records in the log files of the secondary replica that have not been redone | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_blocks` | Number of times the redo thread was blocked on locks held by readers of the database | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_remaining_bytes` | The amount of log, in kilobytes, remaining to be redone to finish the reverting phase | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redone_bytes` | Amount of log records redone on the secondary database in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redones` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_total_log_requiring_undo` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_transaction_delay_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_databases_active_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_backup_restore_operations` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_rows` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_commit_table_entries` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_data_files_size_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_dbcc_logical_scan_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_group_commit_stall_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushed_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_hit_ratio` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_reads` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_size_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_used_size_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_waits` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_wait_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_write_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_growths` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_cache_misses` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_disk_reads` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_deletes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_inserts` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_invalid_hash_entries` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_scan_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_writer_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_empty_free_pool_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_low_memory_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_no_free_buffer_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_req_behind_trunc` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests_old_vlf` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_active_log_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_shared_pool_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_shrinks` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_truncations` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_used_percent` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_pending_repl_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_repl_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_shrink_data_movement_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_tracked_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_write_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_fetch_latency_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_peak_latency_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_log_processed_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_memory_used_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_genstats_active_temp_tables` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_connection_resets` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_event_notifications_delayed_drop` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_http_authenticated_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_logical_connections` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_logins` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_logouts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_mars_deadlocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_non_atomic_yields` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_blocked_processes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_empty_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_method_invocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_initiate_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_terminate_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soapsql_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soapwsdl_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_sql_trace_io_provider_lock_waits` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_recovery_unit_ids_generated` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_rowset_ids_generated` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_creations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_awaiting_destruction` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_trace_event_notification_queue_size` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_transactions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_user_connections` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_locks_average_wait_seconds` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_requests` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts_excluding_NOWAIT` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_waits` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_wait_seconds` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_deadlocks` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_memmgr_connection_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_database_cache_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_external_benefit_of_memory` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_free_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_granted_workspace_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_allocated_lock_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_owner_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_dbreplica_total_log_requiring_undo` | Total kilobytes of log that must be undone | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_transaction_delay_seconds` | Delay in waiting for unterminated commit acknowledgment for all the current transactions | counter | `instance`, `replica`
|
||||
`wmi_mssql_databases_active_transactions` | Number of active transactions for the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_backup_restore_operations` | Read/write throughput for backup and restore operations of a database per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_rows` | Number of rows bulk copied per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_bytes` | Amount of data bulk copied (in kilobytes) per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_commit_table_entries` | he size (row count) of the in-memory portion of the commit table for the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_data_files_size_bytes` | Cumulative size (in kilobytes) of all the data files in the database including any automatic growth. Monitoring this counter is useful, for example, for determining the correct size of tempdb | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_dbcc_logical_scan_bytes` | Number of logical read scan bytes per second for database console commands (DBCC) | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_group_commit_stall_seconds` | Group stall time (microseconds) per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushed_bytes` | Total number of log bytes flushed | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_hit_ratio` | Percentage of log cache reads satisfied from the log cache | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_reads` | Reads performed per second through the log manager cache | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_size_bytes` | Cumulative size (in kilobytes) of all the transaction log files in the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_used_size_bytes` | The cumulative used size of all the log files in the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushes` | Total wait time (in milliseconds) to flush the log. On an Always On secondary database, this value indicates the wait time for log records to be hardened to disk | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_waits` | Number of commits per second waiting for the log flush | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_wait_seconds` | Number of commits per second waiting for the log flush | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_write_seconds` | Time in milliseconds for performing writes of log flushes that were completed in the last second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_growths` | Total number of times the transaction log for the database has been expanded | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_cache_misses` | Number of requests for which the log block was not available in the log pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_disk_reads` | Number of disk reads that the log pool issued to fetch log blocks | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_deletes` | Rate of raw hash entry deletes from the Log Pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_inserts` | Rate of raw hash entry inserts into the Log Pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_invalid_hash_entries` | Rate of hash lookups failing due to being invalid | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_scan_pushes` | Rate of Log block pushes by log scans, which may come from disk or memory | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_writer_pushes` | Rate of Log block pushes by log writer thread | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_empty_free_pool_pushes` | Rate of Log block push fails due to empty free pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_low_memory_pushes` | Rate of Log block push fails due to being low on memory | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_no_free_buffer_pushes` | Rate of Log block push fails due to free buffer unavailable | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_req_behind_trunc` | Log pool cache misses due to block requested being behind truncation LSN | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests_old_vlf` | Log Pool requests that were not in the last VLF of the log | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests` | The number of log-block requests processed by the log pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_active_log_bytes` | Current total active log stored in the shared cache buffer manager in bytes | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_shared_pool_bytes` | Current total memory usage of the shared cache buffer manager in bytes | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_shrinks` | Total number of log shrinks for this database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_truncations` | The number of times the transaction log has been truncated (in Simple Recovery Model) | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_used_percent` | Percentage of space in the log that is in use | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_pending_repl_transactions` | Number of transactions in the transaction log of the publication database marked for replication, but not yet delivered to the distribution database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_repl_transactions` | Number of transactions per second read out of the transaction log of the publication database and delivered to the distribution database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_shrink_data_movement_bytes` | Amount of data being moved per second by autoshrink operations, or DBCC SHRINKDATABASE or DBCC SHRINKFILE statements | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_tracked_transactions` | Number of committed transactions recorded in the commit table for the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_transactions` | Number of transactions started for the database per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_write_transactions` | Number of transactions that wrote to the database and committed, in the last second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_fetch_latency_seconds` | Average latency in microseconds between log blocks entering the Direct Log Consumer and being retrieved by the XTP controller, per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_peak_latency_seconds` | The largest recorded latency, in microseconds, of a fetch from the Direct Log Consumer by the XTP controller | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_log_processed_bytes` | The amount of log bytes processed by the XTP controller thread, per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_memory_used_bytes` | The amount of memory used by XTP in the database | counter | `instance`, `database`
|
||||
`wmi_mssql_genstats_active_temp_tables` | Number of temporary tables/table variables in use | counter | `instance`
|
||||
`wmi_mssql_genstats_connection_resets` | Total number of logins started from the connection pool | counter | `instance`
|
||||
`wmi_mssql_genstats_event_notifications_delayed_drop` | Number of event notifications waiting to be dropped by a system thread | counter | `instance`
|
||||
`wmi_mssql_genstats_http_authenticated_requests` | Number of authenticated HTTP requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_logical_connections` | Number of logical connections to the system | counter | `instance`
|
||||
`wmi_mssql_genstats_logins` | Total number of logins started per second. This does not include pooled connections | counter | `instance`
|
||||
`wmi_mssql_genstats_logouts` | Total number of logout operations started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_mars_deadlocks` | Number of MARS deadlocks detected | counter | `instance`
|
||||
`wmi_mssql_genstats_non_atomic_yields` | Number of non-atomic yields per second | counter | `instance`
|
||||
`wmi_mssql_genstats_blocked_processes` | Number of currently blocked processes | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_empty_requests` | Number of empty SOAP requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_method_invocations` | Number of SOAP method invocations started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_initiate_requests` | Number of SOAP Session initiate requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_terminate_requests` | Number of SOAP Session terminate requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soapsql_requests` | Number of SOAP SQL requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soapwsdl_requests` | Number of SOAP Web Service Description Language requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_sql_trace_io_provider_lock_waits` | Number of waits for the File IO Provider lock per second | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_recovery_unit_ids_generated` | Number of duplicate tempdb recovery unit id generated | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_rowset_ids_generated` | Number of duplicate tempdb rowset id generated | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_creations` | Number of temporary tables/table variables created per second | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_awaiting_destruction` | Number of temporary tables/table variables waiting to be destroyed by the cleanup system thread | counter | `instance`
|
||||
`wmi_mssql_genstats_trace_event_notification_queue_size` | Number of trace event notification instances waiting in the internal queue to be sent through Service Broker | counter | `instance`
|
||||
`wmi_mssql_genstats_transactions` | Number of transaction enlistments (local, DTC, bound all combined) | counter | `instance`
|
||||
`wmi_mssql_genstats_user_connections` | Counts the number of users currently connected to SQL Server | counter | `instance`
|
||||
`wmi_mssql_locks_average_wait_seconds` | Average amount of wait time (in milliseconds) for each lock request that resulted in a wait | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_requests` | Number of new locks and lock conversions per second requested from the lock manager | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts` | Number of lock requests per second that timed out, but excluding requests for NOWAIT locks | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts_excluding_NOWAIT` | Number of lock requests per second that timed out, including requests for NOWAIT locks | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_waits` | Total wait time (in milliseconds) for locks in the last second | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_wait_seconds` | Number of lock requests per second that required the caller to wait | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_deadlocks` | Number of lock requests per second that resulted in a deadlock | counter | `instance`, `resource`
|
||||
`wmi_mssql_memmgr_connection_memory_bytes` | Specifies the total amount of dynamic memory the server is using for maintaining connections | counter | `instance`
|
||||
`wmi_mssql_memmgr_database_cache_memory_bytes` | Specifies the amount of memory the server is currently using for the database pages cache | counter | `instance`
|
||||
`wmi_mssql_memmgr_external_benefit_of_memory` | An internal estimation of the performance benefit from adding memory to a specific cache | counter | `instance`
|
||||
`wmi_mssql_memmgr_free_memory_bytes` | Specifies the amount of committed memory currently not used by the server | counter | `instance`
|
||||
`wmi_mssql_memmgr_granted_workspace_memory_bytes` | Specifies the total amount of memory currently granted to executing processes, such as hash, sort, bulk copy, and index creation operations | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_blocks` | Specifies the current number of lock blocks in use on the server (refreshed periodically). A lock block represents an individual locked resource, such as a table, page, or row | counter | `instance`
|
||||
`wmi_mssql_memmgr_allocated_lock_blocks` | Specifies the current number of allocated lock blocks. At server startup, the number of allocated lock blocks plus the number of allocated lock owner blocks depends on the SQL Server Locks configuration option. If more lock blocks are needed, the value increases | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_memory_bytes` | Specifies the total amount of dynamic memory the server is using for locks | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_owner_blocks` | Specifies the current number of allocated lock owner blocks. At server startup, the number of allocated lock owner blocks and the number of allocated lock blocks depend on the SQL Server Locks configuration option. If more lock owner blocks are needed, the value increases dynamically | counter | `instance`
|
||||
`wmi_mssql_memmgr_allocated_lock_owner_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_log_pool_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_maximum_workspace_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_outstanding_memory_grants` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_pending_memory_grants` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_optimizer_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_reserved_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_sql_cache_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_stolen_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_target_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_total_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_log_pool_memory_bytes` | Total amount of dynamic memory the server is using for Log Pool | counter | `instance`
|
||||
`wmi_mssql_memmgr_maximum_workspace_memory_bytes` | Indicates the maximum amount of memory available for executing processes, such as hash, sort, bulk copy, and index creation operations | counter | `instance`
|
||||
`wmi_mssql_memmgr_outstanding_memory_grants` | Specifies the total number of processes that have successfully acquired a workspace memory grant | counter | `instance`
|
||||
`wmi_mssql_memmgr_pending_memory_grants` | Specifies the total number of processes waiting for a workspace memory grant | counter | `instance`
|
||||
`wmi_mssql_memmgr_optimizer_memory_bytes` | Specifies the total amount of dynamic memory the server is using for query optimization | counter | `instance`
|
||||
`wmi_mssql_memmgr_reserved_server_memory_bytes` | ndicates the amount of memory the server has reserved for future usage. This counter shows the current unused amount of memory initially granted that is shown in Granted Workspace Memory | counter | `instance`
|
||||
`wmi_mssql_memmgr_sql_cache_memory_bytes` | Specifies the total amount of dynamic memory the server is using for the dynamic SQL cache | counter | `instance`
|
||||
`wmi_mssql_memmgr_stolen_server_memory_bytes` | Specifies the amount of memory the server is using for purposes other than database pages | counter | `instance`
|
||||
`wmi_mssql_memmgr_target_server_memory_bytes` | Indicates the ideal amount of memory the server can consume | counter | `instance`
|
||||
`wmi_mssql_memmgr_total_server_memory_bytes` | Specifies the amount of memory the server has committed using the memory manager | counter | `instance`
|
||||
`wmi_mssql_sqlstats_auto_parameterization_attempts` | Number of failed auto-parameterization attempts per second. This should be small. Note that auto-parameterizations are also known as simple parameterizations in later versions of SQL Server | counter | `instance`
|
||||
`wmi_mssql_sqlstats_batch_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_failed_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_forced_parameterizations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_guided_plan_executions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_misguided_plan_executions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_safe_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_attentions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_compilations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_recompilations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_unsafe_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_forced_parameterizations` | Number of successful forced parameterizations per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_guided_plan_executions` | Number of plan executions per second in which the query plan has been generated by using a plan guide | counter | `instance`
|
||||
`wmi_mssql_sqlstats_misguided_plan_executions` | Number of plan executions per second in which a plan guide could not be honored during plan generation | counter | `instance`
|
||||
`wmi_mssql_sqlstats_safe_auto_parameterization_attempts` | Number of safe auto-parameterization attempts per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_attentions` | Number of attentions per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_compilations` | Number of SQL compilations per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_recompilations` | Number of statement recompiles per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_unsafe_auto_parameterization_attempts` | Number of unsafe auto-parameterization attempts per second. | counter | `instance`
|
||||
`wmi_mssql_sql_errors_total` | Information for all errors | counter | `instance`, `resource`
|
||||
`wmi_mssql_transactions_tempdb_free_space_bytes` | The amount of space (in kilobytes) available in tempdb | gauge | `instance`
|
||||
`wmi_mssql_transactions_longest_transaction_running_seconds` | The length of time (in seconds) since the start of the transaction that has been active longer than any other current transaction | gauge | `instance`
|
||||
`wmi_mssql_transactions_nonsnapshot_version_active_total` | The number of currently active transactions that are not using snapshot isolation level and have made data modifications that have generated row versions in the tempdb version store | counter | `instance`
|
||||
`wmi_mssql_transactions_snapshot_active_total` | The number of currently active transactions using the snapshot isolation level | counter | `instance`
|
||||
`wmi_mssql_transactions_active_total` | The number of currently active transactions of all types | counter | `instance`
|
||||
`wmi_mssql_transactions_update_conflicts_total` | The percentage of those transactions using the snapshot isolation level that have encountered update conflicts within the last second | counter | `instance`
|
||||
`wmi_mssql_transactions_update_snapshot_active_total` | The number of currently active transactions using the snapshot isolation level and have modified data | counter | `instance`
|
||||
`wmi_mssql_transactions_version_cleanup_rate_bytes` | The rate (in kilobytes per second) at which row versions are removed from the snapshot isolation version store in tempdb | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_generation_rate_bytes` | The rate (in kilobytes per second) at which new row versions are added to the snapshot isolation version store in tempdb | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_store_size_bytes` | he amount of space (in kilobytes) in tempdb being used to store snapshot isolation level row versions | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_store_units` | The number of active allocation units in the snapshot isolation version store in tempdb | counter | `instance`
|
||||
`wmi_mssql_transactions_version_store_creation_units` | The number of allocation units that have been created in the snapshot isolation store since the instance of the Database Engine was started | counter | `instance`
|
||||
`wmi_mssql_transactions_version_store_truncation_units` | The number of allocation units that have been removed from the snapshot isolation store since the instance of the Database Engine was started | counter | `instance`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
### Buffer Cache Hit Ratio
|
||||
|
||||
When you read the counter in perfmon you will get the the percentage pages found in the buffer cache. This percentage is calculated internally based on the total number of cache hits divided by the total number of cache lookups over the last few thousand page accesses.
|
||||
This collector retrieves the two internal values separately. In order to calculate the Buffer Cache Hit Ratio in PromQL.
|
||||
|
||||
```
|
||||
wmi_mssql_bufman_buffer_cache_hits{instance="host:9182", exported_instance="MSSQLSERVER"} /
|
||||
wmi_mssql_bufman_buffer_cache_lookups{instance="host:9182", exported_instance="MSSQLSERVER"}
|
||||
```
|
||||
|
||||
This principal can be used for following metrics too:
|
||||
- AccessMethodsWorktablesFromCacheHitRatio
|
||||
- accessmethods_worktables_from_cache_hits
|
||||
- accessmethods_worktables_from_cache_lookups
|
||||
- LogCacheHitRatio
|
||||
- databases_log_cache_hits
|
||||
- databases_log_cache_lookups
|
||||
- AverageLockWaitTime
|
||||
- locks_wait_time_seconds
|
||||
- locks_count
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -5,6 +5,7 @@ The net collector exposes metrics about network interfaces
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `net`
|
||||
Data source | Perflib
|
||||
Classes | [`Win32_PerfRawData_Tcpip_NetworkInterface`](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80))
|
||||
Enabled by default? | Yes
|
||||
|
||||
@@ -22,24 +23,40 @@ If given, an interface name needs to *not* match the blacklist regexp in order f
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_net_bytes_received_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_sent_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_outbound_discarded` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_outbound_errors` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_discarded` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_errors` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_unknown` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_sent_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_current_bandwidth` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_received_total` | Total bytes received by interface | counter | `nic`
|
||||
`wmi_net_bytes_sent_total` | Total bytes transmitted by interface | counter | `nic`
|
||||
`wmi_net_bytes_total` | Total bytes received and transmitted by interface | counter | `nic`
|
||||
`wmi_net_packets_outbound_discarded` | Total outbound packets that were chosen to be discarded even though no errors had been detected to prevent transmission | counter | `nic`
|
||||
`wmi_net_packets_outbound_errors` | Total packets that could not be transmitted due to errors | counter | `nic`
|
||||
`wmi_net_packets_received_discarded` | Total inbound packets that were chosen to be discarded even though no errors had been detected to prevent delivery | counter | `nic`
|
||||
`wmi_net_packets_received_errors` | Total packets that could not be received due to errors | counter | `nic`
|
||||
`wmi_net_packets_received_total` | Total packets received by interface | counter | `nic`
|
||||
`wmi_net_packets_received_unknown` | Total packets received by interface that were discarded because of an unknown or unsupported protocol | counter | `nic`
|
||||
`wmi_net_packets_total` | Total packets received and transmitted by interface | counter | `nic`
|
||||
`wmi_net_packets_sent_total` | Total packets transmitted by interface | counter | `nic`
|
||||
`wmi_net_current_bandwidth` | Estimate of the interface's current bandwidth in bits per second (bps) | gauge | `nic`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
Query the rate of transmitted network traffic
|
||||
```
|
||||
rate(wmi_net_bytes_sent_total{instance="localhost"}[2m])
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
Get total utilisation of network interface as a percentage
|
||||
```
|
||||
rate(wmi_net_bytes_total{instance="localhost", nic="Microsoft_Hyper_V_Network_Adapter__1"}[2m]) * 8 / wmi_net_current_bandwidth{instance="locahost", nic="Microsoft_Hyper_V_Network_Adapter__1"} * 100
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
- alert: NetInterfaceUsage
|
||||
expr: rate(wmi_net_bytes_total[2m]) * 8 / wmi_net_current_bandwidth * 100 > 95
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Network Interface Usage (instance {{ $labels.instance }})"
|
||||
description: "Network traffic usage is greater than 95% for interface {{ $labels.nic }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
|
||||
@@ -16,24 +16,51 @@ None
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_os_paging_limit_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_paging_free_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_physical_memory_free_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_time` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_timezone` | _Not yet documented_ | gauge | `timezone`
|
||||
`wmi_os_processes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_processes_limit` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_process_memory_limix_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_users` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_virtual_memory_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_visible_memory_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_virtual_memory_free_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_info` | Contains full product name & version in labels | gauge | `product`, `version`
|
||||
`wmi_os_paging_limit_bytes` | Total number of bytes that can be sotred in the operating system paging files. 0 (zero) indicates that there are no paging files | gauge | None
|
||||
`wmi_os_paging_free_bytes` | Number of bytes that can be mapped into the operating system paging files without causing any other pages to be swapped out | gauge | None
|
||||
`wmi_os_physical_memory_free_bytes` | Bytes of physical memory currently unused and available | gauge | None
|
||||
`wmi_os_time` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None
|
||||
`wmi_os_timezone` | Current timezone as reported by the operating system. See [time.Zone()](https://golang.org/pkg/time/#Time.Zone) for details | gauge | `timezone`
|
||||
`wmi_os_processes` | Number of process contexts currently loaded or running on the operating system | gauge | None
|
||||
`wmi_os_processes_limit` | Maximum number of process contexts the operating system can support. The default value set by the provider is 4294967295 (0xFFFFFFFF) | gauge | None
|
||||
`wmi_os_process_memory_limit_bytes` | Maximum number of bytes of memory that can be allocated to a process | gauge | None
|
||||
`wmi_os_users` | Number of user sessions for which the operating system is storing state information currently. For a list of current active logon sessions, see [`logon`](collector.logon.md) | gauge | None
|
||||
`wmi_os_virtual_memory_bytes` | Bytes of virtual memory | gauge | None
|
||||
`wmi_os_visible_memory_bytes` | Total bytes of physical memory available to the operating system. This value does not necessarily indicate the true amount of physical memory, but what is reported to the operating system as available to it | gauge | None
|
||||
`wmi_os_virtual_memory_free_bytes` | Bytes of virtual memory currently unused and available | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
Show current number of processes
|
||||
```
|
||||
wmi_os_processes{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
Find all devices not set to UTC timezone
|
||||
```
|
||||
wmi_os_timezone{timezone != "UTC"}
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
# Alert on hosts that have exhausted all available physical memory
|
||||
- alert: MemoryExhausted
|
||||
expr: wmi_os_physical_memory_free_bytes == 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Host {{ $labels.instance }} is out of memory"
|
||||
description: "{{ $labels.instance }} has exhausted all available physical memory"
|
||||
|
||||
# Alert on hosts with greater than 90% memory usage
|
||||
- alert: MemoryLow
|
||||
expr: 100 - 100 * wmi_os_physical_memory_free_bytes / wmi_cs_physical_memory_bytes > 90
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Memory usage for host {{ $labels.instance }} is greater than 90%"
|
||||
```
|
||||
|
||||
@@ -66,10 +66,42 @@ A service can have any of the following statuses:
|
||||
Note that there is some overlap with service state.
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
Lists the services that have a 'disabled' start mode.
|
||||
```
|
||||
wmi_service_start_mode{exported_name=~"(mssqlserver|sqlserveragent)",start_mode="disabled"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
Counts the number of Microsoft SQL Server/Agent Processes
|
||||
```
|
||||
count(wmi_service_state{exported_name=~"(sqlserveragent|mssqlserver)",state="running"})
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
groups:
|
||||
- name: Microsoft SQL Server Alerts
|
||||
rules:
|
||||
|
||||
# Sends an alert when the 'sqlserveragent' service is not in the running state for 3 minutes.
|
||||
- alert: SQL Server Agent DOWN
|
||||
expr: wmi_service_state{instance="SQL",exported_name="sqlserveragent",state="running"} == 0
|
||||
for: 3m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Service {{ $labels.exported_name }} down"
|
||||
description: "Service {{ $labels.exported_name }} on instance {{ $labels.instance }} has been down for more than 3 minutes."
|
||||
|
||||
# Sends an alert when the 'mssqlserver' service is not in the running state for 3 minutes.
|
||||
- alert: SQL Server DOWN
|
||||
expr: wmi_service_state{instance="SQL",exported_name="mssqlserver",state="running"} == 0
|
||||
for: 3m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Service {{ $labels.exported_name }} down"
|
||||
description: "Service {{ $labels.exported_name }} on instance {{ $labels.instance }} has been down for more than 3 minutes."
|
||||
```
|
||||
In this example, `instance` is the target label of the host. So each alert will be processed per host, which is then used in the alert description.
|
||||
|
||||
@@ -5,6 +5,7 @@ The system collector exposes metrics about ...
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `system`
|
||||
Data source | Perflib
|
||||
Classes | [`Win32_PerfRawData_PerfOS_System`](https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp)
|
||||
Enabled by default? | Yes
|
||||
|
||||
@@ -16,18 +17,24 @@ None
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_system_context_switches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_exception_dispatches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_processor_queue_length` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_system_calls_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_system_up_time` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_threads` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_context_switches_total` | Total number of [context switches](https://en.wikipedia.org/wiki/Context_switch) | counter | None
|
||||
`wmi_system_exception_dispatches_total` | Total exceptions dispatched by the system | counter | None
|
||||
`wmi_system_processor_queue_length` | Number of threads in the processor queue. There is a single queue for processor time even on computers with multiple processors. | gauge | None
|
||||
`wmi_system_system_calls_total` | Total combined calls to Windows NT system service routines by all processes running on the computer | counter | None
|
||||
`wmi_system_system_up_time` | Time of last boot of system | gauge | None
|
||||
`wmi_system_threads` | Number of Windows system [threads](https://en.wikipedia.org/wiki/Thread_(computing)) | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
Show current number of system threads
|
||||
```
|
||||
wmi_system_threads{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
Find hosts that have rebooted in the last 24 hours
|
||||
```
|
||||
time() - wmi_system_system_up_time < 86400
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -16,15 +16,15 @@ None
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_tcp_connection_failures` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_active` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_established` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_passive` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_reset` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_total` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_received_total` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_retransmitted_total` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_sent_total` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connection_failures` | Number of times TCP connections have made a direct transition to the CLOSED state from the SYN-SENT state or the SYN-RCVD state, plus the number of times TCP connections have made a direct transition from the SYN-RCVD state to the LISTEN state | counter | None
|
||||
`wmi_tcp_connections_active` | Number of times TCP connections have made a direct transition from the CLOSED state to the SYN-SENT state.| counter | None
|
||||
`wmi_tcp_connections_established` | Number of TCP connections for which the current state is either ESTABLISHED or CLOSE-WAIT. | counter | None
|
||||
`wmi_tcp_connections_passive` | Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state. | counter | None
|
||||
`wmi_tcp_connections_reset` | Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state. | counter | None
|
||||
`wmi_tcp_segments_total` | Total segments sent or received using the TCP protocol | counter | None
|
||||
`wmi_tcp_segments_received_total` | Total segments received, including those received in error. This count includes segments received on currently established connections | counter | None
|
||||
`wmi_tcp_segments_retransmitted_total` | Total segments retransmitted. That is, segments transmitted that contain one or more previously transmitted bytes | counter | None
|
||||
`wmi_tcp_segments_sent_total` | Total segments sent, including those on current connections, but excluding those containing *only* retransmitted bytes | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -12,7 +12,7 @@ Enabled by default? | Yes
|
||||
|
||||
### `--collector.textfile.directory`
|
||||
|
||||
The directory containing the files to be ingested. Only files with the extension `.prom` are read.
|
||||
The directory containing the files to be ingested. Only files with the extension `.prom` are read. The `.prom` file must end with an empty line feed to work properly.
|
||||
|
||||
Default value: `C:\Program Files\wmi_exporter\textfile_inputs`
|
||||
|
||||
|
||||
32
docs/collector.thermalzone.md
Normal file
32
docs/collector.thermalzone.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# thermalzone collector
|
||||
|
||||
The thermalzone collector exposes metrics about system temps. Note that temperature is given in Kelvin
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `thermalzone`
|
||||
Classes | [`Win32_PerfRawData_Counters_ThermalZoneInformation`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/#temperature_properties)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_thermalzone_percent_passive_limit` | % Passive Limit is the current limit this thermal zone is placing on the devices it controls. A limit of 100% indicates the devices are unconstrained. A limit of 0% indicates the devices are fully constrained. | gauge | None
|
||||
`wmi_thermalzone_temperature_celsius ` | Temperature of the thermal zone, in degrees Celsius. | gauge | None
|
||||
`wmi_thermalzone_throttle_reasons ` | Throttle Reasons indicate reasons why the thermal zone is limiting performance of the devices it controls. 0x0 – The zone is not throttled. 0x1 – The zone is throttled for thermal reasons. 0x2 – The zone is throttled to limit electrical current. | gauge | None
|
||||
|
||||
[`Throttle reasons` source](https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/examples--requirements-and-diagnostics)
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
242
exporter.go
242
exporter.go
@@ -5,7 +5,9 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -23,7 +25,8 @@ import (
|
||||
|
||||
// WmiCollector implements the prometheus.Collector interface.
|
||||
type WmiCollector struct {
|
||||
collectors map[string]collector.Collector
|
||||
maxScrapeDuration time.Duration
|
||||
collectors map[string]collector.Collector
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -45,6 +48,18 @@ var (
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
scrapeTimeoutDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "collector_timeout"),
|
||||
"wmi_exporter: Whether the collector timed out.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
snapshotDuration = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "perflib_snapshot_duration_seconds"),
|
||||
"Duration of perflib snapshot capture",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
// This can be removed when client_golang exposes this on Windows
|
||||
// (See https://github.com/prometheus/client_golang/issues/376)
|
||||
@@ -64,63 +79,136 @@ func (coll WmiCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- scrapeSuccessDesc
|
||||
}
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus. Collect could be called several times concurrently
|
||||
// and thus its run is protected by a single mutex.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
execute(name, c, ch)
|
||||
wg.Done()
|
||||
}(name, c)
|
||||
}
|
||||
type collectorOutcome int
|
||||
|
||||
const (
|
||||
pending collectorOutcome = iota
|
||||
success
|
||||
failed
|
||||
)
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
startTimeDesc,
|
||||
prometheus.CounterValue,
|
||||
startTime,
|
||||
)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func filterAvailableCollectors(collectors string) string {
|
||||
var availableCollectors []string
|
||||
for _, c := range strings.Split(collectors, ",") {
|
||||
_, ok := collector.Factories[c]
|
||||
if ok {
|
||||
availableCollectors = append(availableCollectors, c)
|
||||
}
|
||||
t := time.Now()
|
||||
cs := make([]string, 0, len(coll.collectors))
|
||||
for name := range coll.collectors {
|
||||
cs = append(cs, name)
|
||||
}
|
||||
return strings.Join(availableCollectors, ",")
|
||||
}
|
||||
|
||||
func execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
|
||||
begin := time.Now()
|
||||
err := c.Collect(ch)
|
||||
duration := time.Since(begin)
|
||||
var success float64
|
||||
|
||||
scrapeContext, err := collector.PrepareScrapeContext(cs)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
snapshotDuration,
|
||||
prometheus.GaugeValue,
|
||||
time.Since(t).Seconds(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("collector %s failed after %fs: %s", name, duration.Seconds(), err)
|
||||
success = 0
|
||||
} else {
|
||||
log.Debugf("collector %s succeeded after %fs.", name, duration.Seconds())
|
||||
success = 1
|
||||
ch <- prometheus.NewInvalidMetric(scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
collectorOutcomes := make(map[string]collectorOutcome)
|
||||
for name := range coll.collectors {
|
||||
collectorOutcomes[name] = pending
|
||||
}
|
||||
|
||||
metricsBuffer := make(chan prometheus.Metric)
|
||||
l := sync.Mutex{}
|
||||
finished := false
|
||||
go func() {
|
||||
for m := range metricsBuffer {
|
||||
l.Lock()
|
||||
if !finished {
|
||||
ch <- m
|
||||
}
|
||||
l.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
defer wg.Done()
|
||||
outcome := execute(name, c, scrapeContext, metricsBuffer)
|
||||
l.Lock()
|
||||
if !finished {
|
||||
collectorOutcomes[name] = outcome
|
||||
}
|
||||
l.Unlock()
|
||||
}(name, c)
|
||||
}
|
||||
|
||||
allDone := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(allDone)
|
||||
close(metricsBuffer)
|
||||
}()
|
||||
|
||||
// Wait until either all collectors finish, or timeout expires
|
||||
select {
|
||||
case <-allDone:
|
||||
case <-time.After(coll.maxScrapeDuration):
|
||||
}
|
||||
|
||||
l.Lock()
|
||||
finished = true
|
||||
|
||||
remainingCollectorNames := make([]string, 0)
|
||||
for name, outcome := range collectorOutcomes {
|
||||
var successValue, timeoutValue float64
|
||||
if outcome == pending {
|
||||
timeoutValue = 1.0
|
||||
remainingCollectorNames = append(remainingCollectorNames, name)
|
||||
}
|
||||
if outcome == success {
|
||||
successValue = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
successValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeTimeoutDesc,
|
||||
prometheus.GaugeValue,
|
||||
timeoutValue,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
if len(remainingCollectorNames) > 0 {
|
||||
log.Warn("Collection timed out, still waiting for ", remainingCollectorNames)
|
||||
}
|
||||
|
||||
l.Unlock()
|
||||
}
|
||||
|
||||
func execute(name string, c collector.Collector, ctx *collector.ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome {
|
||||
t := time.Now()
|
||||
err := c.Collect(ctx, ch)
|
||||
duration := time.Since(t).Seconds()
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeDurationDesc,
|
||||
prometheus.GaugeValue,
|
||||
duration.Seconds(),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
success,
|
||||
duration,
|
||||
name,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("collector %s failed after %fs: %s", name, duration, err)
|
||||
return failed
|
||||
}
|
||||
log.Debugf("collector %s succeeded after %fs.", name, duration)
|
||||
return success
|
||||
}
|
||||
|
||||
func expandEnabledCollectors(enabled string) []string {
|
||||
@@ -144,21 +232,14 @@ func loadCollectors(list string) (map[string]collector.Collector, error) {
|
||||
enabled := expandEnabledCollectors(list)
|
||||
|
||||
for _, name := range enabled {
|
||||
fn, ok := collector.Factories[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("collector '%s' not available", name)
|
||||
}
|
||||
c, err := fn()
|
||||
c, err := collector.Build(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collectors[name] = c
|
||||
}
|
||||
return collectors, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(version.NewCollector("wmi_exporter"))
|
||||
return collectors, nil
|
||||
}
|
||||
|
||||
func initWbem() {
|
||||
@@ -187,11 +268,15 @@ func main() {
|
||||
enabledCollectors = kingpin.Flag(
|
||||
"collectors.enabled",
|
||||
"Comma-separated list of collectors to use. Use '[defaults]' as a placeholder for all the collectors enabled by default.").
|
||||
Default(filterAvailableCollectors(defaultCollectors)).String()
|
||||
Default(defaultCollectors).String()
|
||||
printCollectors = kingpin.Flag(
|
||||
"collectors.print",
|
||||
"If true, print available collectors and exit.",
|
||||
).Bool()
|
||||
timeoutMargin = kingpin.Flag(
|
||||
"scrape.timeout-margin",
|
||||
"Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.",
|
||||
).Default("0.5").Float64()
|
||||
)
|
||||
|
||||
log.AddFlags(kingpin.CommandLine)
|
||||
@@ -200,8 +285,9 @@ func main() {
|
||||
kingpin.Parse()
|
||||
|
||||
if *printCollectors {
|
||||
collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
|
||||
for n := range collector.Factories {
|
||||
collectors := collector.Available()
|
||||
collectorNames := make(sort.StringSlice, 0, len(collectors))
|
||||
for _, n := range collectors {
|
||||
collectorNames = append(collectorNames, n)
|
||||
}
|
||||
collectorNames.Sort()
|
||||
@@ -236,10 +322,17 @@ func main() {
|
||||
|
||||
log.Infof("Enabled collectors: %v", strings.Join(keys(collectors), ", "))
|
||||
|
||||
nodeCollector := WmiCollector{collectors: collectors}
|
||||
prometheus.MustRegister(nodeCollector)
|
||||
h := &metricsHandler{
|
||||
timeoutMargin: *timeoutMargin,
|
||||
collectorFactory: func(timeout time.Duration) *WmiCollector {
|
||||
return &WmiCollector{
|
||||
collectors: collectors,
|
||||
maxScrapeDuration: timeout,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
http.Handle(*metricsPath, promhttp.Handler())
|
||||
http.Handle(*metricsPath, h)
|
||||
http.HandleFunc("/health", healthCheck)
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, *metricsPath, http.StatusMovedPermanently)
|
||||
@@ -303,3 +396,36 @@ loop:
|
||||
changes <- svc.Status{State: svc.StopPending}
|
||||
return
|
||||
}
|
||||
|
||||
type metricsHandler struct {
|
||||
timeoutMargin float64
|
||||
collectorFactory func(timeout time.Duration) *WmiCollector
|
||||
}
|
||||
|
||||
func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
const defaultTimeout = 10.0
|
||||
|
||||
var timeoutSeconds float64
|
||||
if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
|
||||
var err error
|
||||
timeoutSeconds, err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Warnf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout)
|
||||
}
|
||||
}
|
||||
if timeoutSeconds == 0 {
|
||||
timeoutSeconds = defaultTimeout
|
||||
}
|
||||
timeoutSeconds = timeoutSeconds - mh.timeoutMargin
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(mh.collectorFactory(time.Duration(timeoutSeconds * float64(time.Second))))
|
||||
reg.MustRegister(
|
||||
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
|
||||
prometheus.NewGoCollector(),
|
||||
version.NewCollector("wmi_exporter"),
|
||||
)
|
||||
|
||||
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
17
go.mod
Normal file
17
go.mod
Normal file
@@ -0,0 +1,17 @@
|
||||
module github.com/martinlindhe/wmi_exporter
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.14 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.6
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6
|
||||
github.com/dimchansky/utfbom v1.1.0
|
||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||
github.com/leoluk/perflib_exporter v0.1.0
|
||||
github.com/prometheus/client_golang v0.9.2
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||
github.com/prometheus/common v0.2.0
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
)
|
||||
89
go.sum
Normal file
89
go.sum
Normal file
@@ -0,0 +1,89 @@
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v1.0.0 h1:lsek0oXi8iFE9L+EXARyHIjU5rlWIhhTkjDz3vHhWWQ=
|
||||
github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/leoluk/perflib_exporter v0.0.1 h1:MRwP1Ohh/mVevUy4ZUzvSlxnJtm9/NWHeM3aROxnRiQ=
|
||||
github.com/leoluk/perflib_exporter v0.0.1/go.mod h1:4APOQriqHobMzovXV7guPQv0ynKH6vZD3XNmT2MBc6w=
|
||||
github.com/leoluk/perflib_exporter v0.1.0 h1:fXe/mDaf9jR+Zk8FjFlcCSksACuIj2VNN4GyKHmQqtA=
|
||||
github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0 h1:YNOwxxSJzSUARoD9KRZLzM9Y858MNGCOACTvCW9TSAc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a h1:JLXgXKi9RCmLk8DMn8+PCvN++iwpD3KptUbVvHBsKtU=
|
||||
github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d h1:iF+U2tTdys559fmqt0MNaC8QLIJh1twxIIOylDGhswM=
|
||||
github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I=
|
||||
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20180312195533-182114d58262 h1:1NLVUmR8SQ7cNNA5Vo7ronpXbR+5A+9IwIC/bLE7D8Y=
|
||||
golang.org/x/crypto v0.0.0-20180312195533-182114d58262/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180313075820-8c0ece68c283 h1:DE/w7won1Ns86VoWjUZ4cJS6//TObJntGkxuZ63asRc=
|
||||
golang.org/x/sys v0.0.0-20180313075820-8c0ece68c283/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
|
||||
golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"Disable": [
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gosec",
|
||||
"maligned",
|
||||
"megacheck"
|
||||
],
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"golint",
|
||||
"gotype",
|
||||
"gotypex",
|
||||
"ineffassign",
|
||||
"interfacer",
|
||||
"structcheck",
|
||||
"unconvert",
|
||||
"varcheck",
|
||||
"vet",
|
||||
"vetshadow"
|
||||
],
|
||||
"Exclude": [
|
||||
"don't use underscores in Go names",
|
||||
"exported type .+ should have comment or be unexported",
|
||||
"should be"
|
||||
]
|
||||
}
|
||||
@@ -29,7 +29,7 @@ func New{{ .CollectorName }}Collector() (Collector, error) {
|
||||
}
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *{{ .CollectorName }}Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *{{ .CollectorName }}Collector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting {{ .CollectorName | toLower }} metrics:", desc, err)
|
||||
return err
|
||||
|
||||
20
vendor/github.com/StackExchange/wmi/LICENSE
generated
vendored
20
vendor/github.com/StackExchange/wmi/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Stack Exchange
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
260
vendor/github.com/StackExchange/wmi/swbemservices.go
generated
vendored
260
vendor/github.com/StackExchange/wmi/swbemservices.go
generated
vendored
@@ -1,260 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package wmi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
)
|
||||
|
||||
// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx
|
||||
type SWbemServices struct {
|
||||
//TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance
|
||||
cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method
|
||||
sWbemLocatorIUnknown *ole.IUnknown
|
||||
sWbemLocatorIDispatch *ole.IDispatch
|
||||
queries chan *queryRequest
|
||||
closeError chan error
|
||||
lQueryorClose sync.Mutex
|
||||
}
|
||||
|
||||
type queryRequest struct {
|
||||
query string
|
||||
dst interface{}
|
||||
args []interface{}
|
||||
finished chan error
|
||||
}
|
||||
|
||||
// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI
|
||||
func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) {
|
||||
//fmt.Println("InitializeSWbemServices: Starting")
|
||||
//TODO: implement connectServerArgs as optional argument for init with connectServer call
|
||||
s := new(SWbemServices)
|
||||
s.cWMIClient = c
|
||||
s.queries = make(chan *queryRequest)
|
||||
initError := make(chan error)
|
||||
go s.process(initError)
|
||||
|
||||
err, ok := <-initError
|
||||
if ok {
|
||||
return nil, err //Send error to caller
|
||||
}
|
||||
//fmt.Println("InitializeSWbemServices: Finished")
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close will clear and release all of the SWbemServices resources
|
||||
func (s *SWbemServices) Close() error {
|
||||
s.lQueryorClose.Lock()
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
if s.queries == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices has been closed")
|
||||
}
|
||||
//fmt.Println("Close: sending close request")
|
||||
var result error
|
||||
ce := make(chan error)
|
||||
s.closeError = ce //Race condition if multiple callers to close. May need to lock here
|
||||
close(s.queries) //Tell background to shut things down
|
||||
s.lQueryorClose.Unlock()
|
||||
err, ok := <-ce
|
||||
if ok {
|
||||
result = err
|
||||
}
|
||||
//fmt.Println("Close: finished")
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *SWbemServices) process(initError chan error) {
|
||||
//fmt.Println("process: starting background thread initialization")
|
||||
//All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine
|
||||
runtime.LockOSThread()
|
||||
defer runtime.LockOSThread()
|
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err != nil {
|
||||
oleCode := err.(*ole.OleError).Code()
|
||||
if oleCode != ole.S_OK && oleCode != S_FALSE {
|
||||
initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
|
||||
if err != nil {
|
||||
initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err)
|
||||
return
|
||||
} else if unknown == nil {
|
||||
initError <- ErrNilCreateObject
|
||||
return
|
||||
}
|
||||
defer unknown.Release()
|
||||
s.sWbemLocatorIUnknown = unknown
|
||||
|
||||
dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch)
|
||||
if err != nil {
|
||||
initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err)
|
||||
return
|
||||
}
|
||||
defer dispatch.Release()
|
||||
s.sWbemLocatorIDispatch = dispatch
|
||||
|
||||
// we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs
|
||||
//fmt.Println("process: initialized. closing initError")
|
||||
close(initError)
|
||||
//fmt.Println("process: waiting for queries")
|
||||
for q := range s.queries {
|
||||
//fmt.Printf("process: new query: len(query)=%d\n", len(q.query))
|
||||
errQuery := s.queryBackground(q)
|
||||
//fmt.Println("process: s.queryBackground finished")
|
||||
if errQuery != nil {
|
||||
q.finished <- errQuery
|
||||
}
|
||||
close(q.finished)
|
||||
}
|
||||
//fmt.Println("process: queries channel closed")
|
||||
s.queries = nil //set channel to nil so we know it is closed
|
||||
//TODO: I think the Release/Clear calls can panic if things are in a bad state.
|
||||
//TODO: May need to recover from panics and send error to method caller instead.
|
||||
close(s.closeError)
|
||||
}
|
||||
|
||||
// Query runs the WQL query using a SWbemServices instance and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
s.lQueryorClose.Lock()
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
if s.queries == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices has been closed")
|
||||
}
|
||||
|
||||
//fmt.Println("Query: Sending query request")
|
||||
qr := queryRequest{
|
||||
query: query,
|
||||
dst: dst,
|
||||
args: connectServerArgs,
|
||||
finished: make(chan error),
|
||||
}
|
||||
s.queries <- &qr
|
||||
s.lQueryorClose.Unlock()
|
||||
err, ok := <-qr.finished
|
||||
if ok {
|
||||
//fmt.Println("Query: Finished with error")
|
||||
return err //Send error to caller
|
||||
}
|
||||
//fmt.Println("Query: Finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SWbemServices) queryBackground(q *queryRequest) error {
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart
|
||||
//fmt.Println("queryBackground: Starting")
|
||||
|
||||
dv := reflect.ValueOf(q.dst)
|
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
dv = dv.Elem()
|
||||
mat, elemType := checkMultiArg(dv)
|
||||
if mat == multiArgTypeInvalid {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := serviceRaw.ToIDispatch()
|
||||
defer serviceRaw.Clear()
|
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := resultRaw.ToIDispatch()
|
||||
defer resultRaw.Clear()
|
||||
|
||||
count, err := oleInt64(result, "Count")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer enumProperty.Clear()
|
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if enum == nil {
|
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
|
||||
|
||||
var errFieldMismatch error
|
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch()
|
||||
defer item.Release()
|
||||
|
||||
ev := reflect.New(elemType)
|
||||
if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil {
|
||||
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if mat != multiArgTypeStructPtr {
|
||||
ev = ev.Elem()
|
||||
}
|
||||
dv.Set(reflect.Append(dv, ev))
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//fmt.Println("queryBackground: Finished")
|
||||
return errFieldMismatch
|
||||
}
|
||||
486
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
486
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
@@ -1,486 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Package wmi provides a WQL interface for WMI on Windows.
|
||||
|
||||
Example code to print names of running processes:
|
||||
|
||||
type Win32_Process struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func main() {
|
||||
var dst []Win32_Process
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
err := wmi.Query(q, &dst)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for i, v := range dst {
|
||||
println(i, v.Name)
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
package wmi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
)
|
||||
|
||||
var l = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
var (
|
||||
ErrInvalidEntityType = errors.New("wmi: invalid entity type")
|
||||
// ErrNilCreateObject is the error returned if CreateObject returns nil even
|
||||
// if the error was nil.
|
||||
ErrNilCreateObject = errors.New("wmi: create object returned nil")
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
// S_FALSE is returned by CoInitializeEx if it was already called on this thread.
|
||||
const S_FALSE = 0x00000001
|
||||
|
||||
// QueryNamespace invokes Query with the given namespace on the local machine.
|
||||
func QueryNamespace(query string, dst interface{}, namespace string) error {
|
||||
return Query(query, dst, nil, namespace)
|
||||
}
|
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
//
|
||||
// Query is a wrapper around DefaultClient.Query.
|
||||
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
if DefaultClient.SWbemServicesClient == nil {
|
||||
return DefaultClient.Query(query, dst, connectServerArgs...)
|
||||
}
|
||||
return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...)
|
||||
}
|
||||
|
||||
// A Client is an WMI query client.
|
||||
//
|
||||
// Its zero value (DefaultClient) is a usable client.
|
||||
type Client struct {
|
||||
// NonePtrZero specifies if nil values for fields which aren't pointers
|
||||
// should be returned as the field types zero value.
|
||||
//
|
||||
// Setting this to true allows stucts without pointer fields to be used
|
||||
// without the risk failure should a nil value returned from WMI.
|
||||
NonePtrZero bool
|
||||
|
||||
// PtrNil specifies if nil values for pointer fields should be returned
|
||||
// as nil.
|
||||
//
|
||||
// Setting this to true will set pointer fields to nil where WMI
|
||||
// returned nil, otherwise the types zero value will be returned.
|
||||
PtrNil bool
|
||||
|
||||
// AllowMissingFields specifies that struct fields not present in the
|
||||
// query result should not result in an error.
|
||||
//
|
||||
// Setting this to true allows custom queries to be used with full
|
||||
// struct definitions instead of having to define multiple structs.
|
||||
AllowMissingFields bool
|
||||
|
||||
// SWbemServiceClient is an optional SWbemServices object that can be
|
||||
// initialized and then reused across multiple queries. If it is null
|
||||
// then the method will initialize a new temporary client each time.
|
||||
SWbemServicesClient *SWbemServices
|
||||
}
|
||||
|
||||
// DefaultClient is the default Client and is used by Query, QueryNamespace
|
||||
var DefaultClient = &Client{}
|
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
dv := reflect.ValueOf(dst)
|
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
dv = dv.Elem()
|
||||
mat, elemType := checkMultiArg(dv)
|
||||
if mat == multiArgTypeInvalid {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err != nil {
|
||||
oleCode := err.(*ole.OleError).Code()
|
||||
if oleCode != ole.S_OK && oleCode != S_FALSE {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
|
||||
if err != nil {
|
||||
return err
|
||||
} else if unknown == nil {
|
||||
return ErrNilCreateObject
|
||||
}
|
||||
defer unknown.Release()
|
||||
|
||||
wmi, err := unknown.QueryInterface(ole.IID_IDispatch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer wmi.Release()
|
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := serviceRaw.ToIDispatch()
|
||||
defer serviceRaw.Clear()
|
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := resultRaw.ToIDispatch()
|
||||
defer resultRaw.Clear()
|
||||
|
||||
count, err := oleInt64(result, "Count")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer enumProperty.Clear()
|
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if enum == nil {
|
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
|
||||
|
||||
var errFieldMismatch error
|
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch()
|
||||
defer item.Release()
|
||||
|
||||
ev := reflect.New(elemType)
|
||||
if err = c.loadEntity(ev.Interface(), item); err != nil {
|
||||
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if mat != multiArgTypeStructPtr {
|
||||
ev = ev.Elem()
|
||||
}
|
||||
dv.Set(reflect.Append(dv, ev))
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return errFieldMismatch
|
||||
}
|
||||
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct.
|
||||
// StructType is the type of the struct pointed to by the destination argument.
|
||||
type ErrFieldMismatch struct {
|
||||
StructType reflect.Type
|
||||
FieldName string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrFieldMismatch) Error() string {
|
||||
return fmt.Sprintf("wmi: cannot load field %q into a %q: %s",
|
||||
e.FieldName, e.StructType, e.Reason)
|
||||
}
|
||||
|
||||
var timeType = reflect.TypeOf(time.Time{})
|
||||
|
||||
// loadEntity loads a SWbemObject into a struct pointer.
|
||||
func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {
|
||||
v := reflect.ValueOf(dst).Elem()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
of := f
|
||||
isPtr := f.Kind() == reflect.Ptr
|
||||
if isPtr {
|
||||
ptr := reflect.New(f.Type().Elem())
|
||||
f.Set(ptr)
|
||||
f = f.Elem()
|
||||
}
|
||||
n := v.Type().Field(i).Name
|
||||
if !f.CanSet() {
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "CanSet() is false",
|
||||
}
|
||||
}
|
||||
prop, err := oleutil.GetProperty(src, n)
|
||||
if err != nil {
|
||||
if !c.AllowMissingFields {
|
||||
errFieldMismatch = &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "no such struct field",
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
defer prop.Clear()
|
||||
|
||||
switch val := prop.Value().(type) {
|
||||
case int8, int16, int32, int64, int:
|
||||
v := reflect.ValueOf(val).Int()
|
||||
switch f.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
f.SetInt(v)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
f.SetUint(uint64(v))
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not an integer class",
|
||||
}
|
||||
}
|
||||
case uint8, uint16, uint32, uint64:
|
||||
v := reflect.ValueOf(val).Uint()
|
||||
switch f.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
f.SetInt(int64(v))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
f.SetUint(v)
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not an integer class",
|
||||
}
|
||||
}
|
||||
case string:
|
||||
switch f.Kind() {
|
||||
case reflect.String:
|
||||
f.SetString(val)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
iv, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetInt(iv)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
uv, err := strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetUint(uv)
|
||||
case reflect.Struct:
|
||||
switch f.Type() {
|
||||
case timeType:
|
||||
if len(val) == 25 {
|
||||
mins, err := strconv.Atoi(val[22:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60)
|
||||
}
|
||||
t, err := time.Parse("20060102150405.000000-0700", val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Set(reflect.ValueOf(t))
|
||||
}
|
||||
}
|
||||
case bool:
|
||||
switch f.Kind() {
|
||||
case reflect.Bool:
|
||||
f.SetBool(val)
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not a bool",
|
||||
}
|
||||
}
|
||||
case float32:
|
||||
switch f.Kind() {
|
||||
case reflect.Float32:
|
||||
f.SetFloat(float64(val))
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not a Float32",
|
||||
}
|
||||
}
|
||||
default:
|
||||
if f.Kind() == reflect.Slice {
|
||||
switch f.Type().Elem().Kind() {
|
||||
case reflect.String:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetString(v.(string))
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
case reflect.Uint8:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetUint(reflect.ValueOf(v).Uint())
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported slice type (%T)", val),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
typeof := reflect.TypeOf(val)
|
||||
if typeof == nil && (isPtr || c.NonePtrZero) {
|
||||
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
|
||||
of.Set(reflect.Zero(of.Type()))
|
||||
}
|
||||
break
|
||||
}
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported type (%T)", val),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errFieldMismatch
|
||||
}
|
||||
|
||||
type multiArgType int
|
||||
|
||||
const (
|
||||
multiArgTypeInvalid multiArgType = iota
|
||||
multiArgTypeStruct
|
||||
multiArgTypeStructPtr
|
||||
)
|
||||
|
||||
// checkMultiArg checks that v has type []S, []*S for some struct type S.
|
||||
//
|
||||
// It returns what category the slice's elements are, and the reflect.Type
|
||||
// that represents S.
|
||||
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
|
||||
if v.Kind() != reflect.Slice {
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
elemType = v.Type().Elem()
|
||||
switch elemType.Kind() {
|
||||
case reflect.Struct:
|
||||
return multiArgTypeStruct, elemType
|
||||
case reflect.Ptr:
|
||||
elemType = elemType.Elem()
|
||||
if elemType.Kind() == reflect.Struct {
|
||||
return multiArgTypeStructPtr, elemType
|
||||
}
|
||||
}
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
|
||||
func oleInt64(item *ole.IDispatch, prop string) (int64, error) {
|
||||
v, err := oleutil.GetProperty(item, prop)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer v.Clear()
|
||||
|
||||
i := int64(v.Val)
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// CreateQuery returns a WQL query string that queries all columns of src. where
|
||||
// is an optional string that is appended to the query, to be used with WHERE
|
||||
// clauses. In such a case, the "WHERE" string should appear at the beginning.
|
||||
func CreateQuery(src interface{}, where string) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("SELECT ")
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
if s.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return ""
|
||||
}
|
||||
var fields []string
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fields = append(fields, t.Field(i).Name)
|
||||
}
|
||||
b.WriteString(strings.Join(fields, ", "))
|
||||
b.WriteString(" FROM ")
|
||||
b.WriteString(t.Name())
|
||||
b.WriteString(" " + where)
|
||||
return b.String()
|
||||
}
|
||||
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
@@ -1,406 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see package html/template, which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
Actions may not span newlines, although comments can.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail below.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation of the value of the pipeline
|
||||
is copied to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order ("comparable"), the
|
||||
elements will be visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants, although raw strings may not span newlines.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of the each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument, that is,
|
||||
"and x y" behaves as "if x then y else x". All the
|
||||
arguments are evaluated.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y". All the
|
||||
arguments are evaluated.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on basic types only (or named basic
|
||||
types, such as "type Celsius float32"). They implement the Go rules
|
||||
for comparison of values, except that size and exact type are
|
||||
ignored, so any integer value, signed or unsigned, may be compared
|
||||
with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all
|
||||
unsigned integers.) However, as usual, one may not compare an int
|
||||
with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
`{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}`
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with the Clone or AddParseTree method.
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
|
||||
related templates stored in files.
|
||||
|
||||
A template may be executed directly or through ExecuteTemplate, which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
||||
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
@@ -1,845 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// state represents the state of an execution. It's not part of the
|
||||
// template so that multiple executions of the same template
|
||||
// can execute in parallel.
|
||||
type state struct {
|
||||
tmpl *Template
|
||||
wr io.Writer
|
||||
node parse.Node // current node, for errors
|
||||
vars []variable // push-down stack of variable values.
|
||||
}
|
||||
|
||||
// variable holds the dynamic value of a variable such as $, $x etc.
|
||||
type variable struct {
|
||||
name string
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
// push pushes a new variable on the stack.
|
||||
func (s *state) push(name string, value reflect.Value) {
|
||||
s.vars = append(s.vars, variable{name, value})
|
||||
}
|
||||
|
||||
// mark returns the length of the variable stack.
|
||||
func (s *state) mark() int {
|
||||
return len(s.vars)
|
||||
}
|
||||
|
||||
// pop pops the variable stack up to the mark.
|
||||
func (s *state) pop(mark int) {
|
||||
s.vars = s.vars[0:mark]
|
||||
}
|
||||
|
||||
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
|
||||
func (s *state) setVar(n int, value reflect.Value) {
|
||||
s.vars[len(s.vars)-n].value = value
|
||||
}
|
||||
|
||||
// varValue returns the value of the named variable.
|
||||
func (s *state) varValue(name string) reflect.Value {
|
||||
for i := s.mark() - 1; i >= 0; i-- {
|
||||
if s.vars[i].name == name {
|
||||
return s.vars[i].value
|
||||
}
|
||||
}
|
||||
s.errorf("undefined variable: %s", name)
|
||||
return zero
|
||||
}
|
||||
|
||||
var zero reflect.Value
|
||||
|
||||
// at marks the state to be on node n, for error reporting.
|
||||
func (s *state) at(node parse.Node) {
|
||||
s.node = node
|
||||
}
|
||||
|
||||
// doublePercent returns the string with %'s replaced by %%, if necessary,
|
||||
// so it can be used safely inside a Printf format string.
|
||||
func doublePercent(str string) string {
|
||||
if strings.Contains(str, "%") {
|
||||
str = strings.Replace(str, "%", "%%", -1)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (s *state) errorf(format string, args ...interface{}) {
|
||||
name := doublePercent(s.tmpl.Name())
|
||||
if s.node == nil {
|
||||
format = fmt.Sprintf("template: %s: %s", name, format)
|
||||
} else {
|
||||
location, context := s.tmpl.ErrorContext(s.node)
|
||||
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
|
||||
}
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// errRecover is the handler that turns panics into returns from the top
|
||||
// level of Parse.
|
||||
func errRecover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
switch err := e.(type) {
|
||||
case runtime.Error:
|
||||
panic(e)
|
||||
case error:
|
||||
*errp = err
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteTemplate applies the template associated with t that has the given name
|
||||
// to the specified data object and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
|
||||
tmpl := t.tmpl[name]
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
|
||||
}
|
||||
return tmpl.Execute(wr, data)
|
||||
}
|
||||
|
||||
// Execute applies a parsed template to the specified data object,
|
||||
// and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
|
||||
defer errRecover(&err)
|
||||
value := reflect.ValueOf(data)
|
||||
state := &state{
|
||||
tmpl: t,
|
||||
wr: wr,
|
||||
vars: []variable{{"$", value}},
|
||||
}
|
||||
t.init()
|
||||
if t.Tree == nil || t.Root == nil {
|
||||
var b bytes.Buffer
|
||||
for name, tmpl := range t.tmpl {
|
||||
if tmpl.Tree == nil || tmpl.Root == nil {
|
||||
continue
|
||||
}
|
||||
if b.Len() > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&b, "%q", name)
|
||||
}
|
||||
var s string
|
||||
if b.Len() > 0 {
|
||||
s = "; defined templates are: " + b.String()
|
||||
}
|
||||
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
|
||||
}
|
||||
state.walk(value, t.Root)
|
||||
return
|
||||
}
|
||||
|
||||
// Walk functions step through the major pieces of the template structure,
|
||||
// generating output as they go.
|
||||
func (s *state) walk(dot reflect.Value, node parse.Node) {
|
||||
s.at(node)
|
||||
switch node := node.(type) {
|
||||
case *parse.ActionNode:
|
||||
// Do not pop variables so they persist until next end.
|
||||
// Also, if the action declares variables, don't print the result.
|
||||
val := s.evalPipeline(dot, node.Pipe)
|
||||
if len(node.Pipe.Decl) == 0 {
|
||||
s.printValue(node, val)
|
||||
}
|
||||
case *parse.IfNode:
|
||||
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
|
||||
case *parse.ListNode:
|
||||
for _, node := range node.Nodes {
|
||||
s.walk(dot, node)
|
||||
}
|
||||
case *parse.RangeNode:
|
||||
s.walkRange(dot, node)
|
||||
case *parse.TemplateNode:
|
||||
s.walkTemplate(dot, node)
|
||||
case *parse.TextNode:
|
||||
if _, err := s.wr.Write(node.Text); err != nil {
|
||||
s.errorf("%s", err)
|
||||
}
|
||||
case *parse.WithNode:
|
||||
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
|
||||
default:
|
||||
s.errorf("unknown node: %s", node)
|
||||
}
|
||||
}
|
||||
|
||||
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
|
||||
// are identical in behavior except that 'with' sets dot.
|
||||
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
|
||||
defer s.pop(s.mark())
|
||||
val := s.evalPipeline(dot, pipe)
|
||||
truth, ok := isTrue(val)
|
||||
if !ok {
|
||||
s.errorf("if/with can't use %v", val)
|
||||
}
|
||||
if truth {
|
||||
if typ == parse.NodeWith {
|
||||
s.walk(val, list)
|
||||
} else {
|
||||
s.walk(dot, list)
|
||||
}
|
||||
} else if elseList != nil {
|
||||
s.walk(dot, elseList)
|
||||
}
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
||||
|
||||
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
|
||||
s.at(r)
|
||||
defer s.pop(s.mark())
|
||||
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
|
||||
// mark top of stack before any variables in the body are pushed.
|
||||
mark := s.mark()
|
||||
oneIteration := func(index, elem reflect.Value) {
|
||||
// Set top var (lexically the second if there are two) to the element.
|
||||
if len(r.Pipe.Decl) > 0 {
|
||||
s.setVar(1, elem)
|
||||
}
|
||||
// Set next var (lexically the first if there are two) to the index.
|
||||
if len(r.Pipe.Decl) > 1 {
|
||||
s.setVar(2, index)
|
||||
}
|
||||
s.walk(elem, r.List)
|
||||
s.pop(mark)
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
oneIteration(reflect.ValueOf(i), val.Index(i))
|
||||
}
|
||||
return
|
||||
case reflect.Map:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for _, key := range sortKeys(val.MapKeys()) {
|
||||
oneIteration(key, val.MapIndex(key))
|
||||
}
|
||||
return
|
||||
case reflect.Chan:
|
||||
if val.IsNil() {
|
||||
break
|
||||
}
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
elem, ok := val.Recv()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
oneIteration(reflect.ValueOf(i), elem)
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
return
|
||||
case reflect.Invalid:
|
||||
break // An invalid value is likely a nil map, etc. and acts like an empty map.
|
||||
default:
|
||||
s.errorf("range can't iterate over %v", val)
|
||||
}
|
||||
if r.ElseList != nil {
|
||||
s.walk(dot, r.ElseList)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
|
||||
s.at(t)
|
||||
tmpl := s.tmpl.tmpl[t.Name]
|
||||
if tmpl == nil {
|
||||
s.errorf("template %q not defined", t.Name)
|
||||
}
|
||||
// Variables declared by the pipeline persist.
|
||||
dot = s.evalPipeline(dot, t.Pipe)
|
||||
newState := *s
|
||||
newState.tmpl = tmpl
|
||||
// No dynamic scoping: template invocations inherit no variables.
|
||||
newState.vars = []variable{{"$", dot}}
|
||||
newState.walk(dot, tmpl.Root)
|
||||
}
|
||||
|
||||
// Eval functions evaluate pipelines, commands, and their elements and extract
|
||||
// values from the data structure by examining fields, calling methods, and so on.
|
||||
// The printing of those values happens only through walk functions.
|
||||
|
||||
// evalPipeline returns the value acquired by evaluating a pipeline. If the
|
||||
// pipeline has a variable declaration, the variable will be pushed on the
|
||||
// stack. Callers should therefore pop the stack after they are finished
|
||||
// executing commands depending on the pipeline value.
|
||||
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
|
||||
if pipe == nil {
|
||||
return
|
||||
}
|
||||
s.at(pipe)
|
||||
for _, cmd := range pipe.Cmds {
|
||||
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
|
||||
// If the object has type interface{}, dig down one level to the thing inside.
|
||||
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
|
||||
value = reflect.ValueOf(value.Interface()) // lovely!
|
||||
}
|
||||
}
|
||||
for _, variable := range pipe.Decl {
|
||||
s.push(variable.Ident[0], value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
|
||||
if len(args) > 1 || final.IsValid() {
|
||||
s.errorf("can't give argument to non-function %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
|
||||
firstWord := cmd.Args[0]
|
||||
switch n := firstWord.(type) {
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, cmd.Args, final)
|
||||
case *parse.ChainNode:
|
||||
return s.evalChainNode(dot, n, cmd.Args, final)
|
||||
case *parse.IdentifierNode:
|
||||
// Must be a function.
|
||||
return s.evalFunction(dot, n, cmd, cmd.Args, final)
|
||||
case *parse.PipeNode:
|
||||
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
|
||||
return s.evalPipeline(dot, n)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, cmd.Args, final)
|
||||
}
|
||||
s.at(firstWord)
|
||||
s.notAFunction(cmd.Args, final)
|
||||
switch word := firstWord.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(word.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.NilNode:
|
||||
s.errorf("nil is not a command")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(word)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(word.Text)
|
||||
}
|
||||
s.errorf("can't evaluate command %q", firstWord)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// idealConstant is called to return the value of a number in a context where
|
||||
// we don't know the type. In that case, the syntax of the number tells us
|
||||
// its type, and we use Go rules to resolve. Note there is no such thing as
|
||||
// a uint ideal constant in this situation - the value must be of int type.
|
||||
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
|
||||
// These are ideal constants but we don't know the type
|
||||
// and we have no context. (If it was a method argument,
|
||||
// we'd know what we need.) The syntax guides us to some extent.
|
||||
s.at(constant)
|
||||
switch {
|
||||
case constant.IsComplex:
|
||||
return reflect.ValueOf(constant.Complex128) // incontrovertible.
|
||||
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
|
||||
return reflect.ValueOf(constant.Float64)
|
||||
case constant.IsInt:
|
||||
n := int(constant.Int64)
|
||||
if int64(n) != constant.Int64 {
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return reflect.ValueOf(n)
|
||||
case constant.IsUint:
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return zero
|
||||
}
|
||||
|
||||
func isHexConstant(s string) bool {
|
||||
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
|
||||
}
|
||||
|
||||
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(field)
|
||||
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(chain)
|
||||
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
|
||||
pipe := s.evalArg(dot, nil, chain.Node)
|
||||
if len(chain.Field) == 0 {
|
||||
s.errorf("internal error: no fields in evalChainNode")
|
||||
}
|
||||
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
|
||||
s.at(variable)
|
||||
value := s.varValue(variable.Ident[0])
|
||||
if len(variable.Ident) == 1 {
|
||||
s.notAFunction(args, final)
|
||||
return value
|
||||
}
|
||||
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
|
||||
}
|
||||
|
||||
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
|
||||
// dot is the environment in which to evaluate arguments, while
|
||||
// receiver is the value being walked along the chain.
|
||||
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
n := len(ident)
|
||||
for i := 0; i < n-1; i++ {
|
||||
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
|
||||
}
|
||||
// Now if it's a method, it gets the arguments.
|
||||
return s.evalField(dot, ident[n-1], node, args, final, receiver)
|
||||
}
|
||||
|
||||
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(node)
|
||||
name := node.Ident
|
||||
function, ok := findFunction(name, s.tmpl)
|
||||
if !ok {
|
||||
s.errorf("%q is not a defined function", name)
|
||||
}
|
||||
return s.evalCall(dot, function, cmd, name, args, final)
|
||||
}
|
||||
|
||||
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
|
||||
// The 'final' argument represents the return value from the preceding
|
||||
// value of the pipeline, if any.
|
||||
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
|
||||
if !receiver.IsValid() {
|
||||
return zero
|
||||
}
|
||||
typ := receiver.Type()
|
||||
receiver, _ = indirect(receiver)
|
||||
// Unless it's an interface, need to get to a value of type *T to guarantee
|
||||
// we see all methods of T and *T.
|
||||
ptr := receiver
|
||||
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
|
||||
ptr = ptr.Addr()
|
||||
}
|
||||
if method := ptr.MethodByName(fieldName); method.IsValid() {
|
||||
return s.evalCall(dot, method, node, fieldName, args, final)
|
||||
}
|
||||
hasArgs := len(args) > 1 || final.IsValid()
|
||||
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
|
||||
receiver, isNil := indirect(receiver)
|
||||
if isNil {
|
||||
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
|
||||
}
|
||||
switch receiver.Kind() {
|
||||
case reflect.Struct:
|
||||
tField, ok := receiver.Type().FieldByName(fieldName)
|
||||
if ok {
|
||||
field := receiver.FieldByIndex(tField.Index)
|
||||
if tField.PkgPath != "" { // field is unexported
|
||||
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
|
||||
}
|
||||
// If it's a function, we must call it.
|
||||
if hasArgs {
|
||||
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
|
||||
}
|
||||
return field
|
||||
}
|
||||
s.errorf("%s is not a field of struct type %s", fieldName, typ)
|
||||
case reflect.Map:
|
||||
// If it's a map, attempt to use the field name as a key.
|
||||
nameVal := reflect.ValueOf(fieldName)
|
||||
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
|
||||
if hasArgs {
|
||||
s.errorf("%s is not a method but has arguments", fieldName)
|
||||
}
|
||||
return receiver.MapIndex(nameVal)
|
||||
}
|
||||
}
|
||||
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
|
||||
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
|
||||
// as the function itself.
|
||||
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
if args != nil {
|
||||
args = args[1:] // Zeroth arg is function name/node; not passed to function.
|
||||
}
|
||||
typ := fun.Type()
|
||||
numIn := len(args)
|
||||
if final.IsValid() {
|
||||
numIn++
|
||||
}
|
||||
numFixed := len(args)
|
||||
if typ.IsVariadic() {
|
||||
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
|
||||
if numIn < numFixed {
|
||||
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
|
||||
}
|
||||
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
|
||||
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
|
||||
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
|
||||
}
|
||||
// Build the arg list.
|
||||
argv := make([]reflect.Value, numIn)
|
||||
// Args must be evaluated. Fixed args first.
|
||||
i := 0
|
||||
for ; i < numFixed && i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, typ.In(i), args[i])
|
||||
}
|
||||
// Now the ... args.
|
||||
if typ.IsVariadic() {
|
||||
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
|
||||
for ; i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, argType, args[i])
|
||||
}
|
||||
}
|
||||
// Add final value if necessary.
|
||||
if final.IsValid() {
|
||||
t := typ.In(typ.NumIn() - 1)
|
||||
if typ.IsVariadic() {
|
||||
t = t.Elem()
|
||||
}
|
||||
argv[i] = s.validateType(final, t)
|
||||
}
|
||||
result := fun.Call(argv)
|
||||
// If we have an error that is not nil, stop execution and return that error to the caller.
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
s.at(node)
|
||||
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
|
||||
}
|
||||
return result[0]
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validateType guarantees that the value is valid and assignable to the type.
|
||||
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
|
||||
if !value.IsValid() {
|
||||
if typ == nil || canBeNil(typ) {
|
||||
// An untyped nil interface{}. Accept as a proper nil value.
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("invalid value; expected %s", typ)
|
||||
}
|
||||
if typ != nil && !value.Type().AssignableTo(typ) {
|
||||
if value.Kind() == reflect.Interface && !value.IsNil() {
|
||||
value = value.Elem()
|
||||
if value.Type().AssignableTo(typ) {
|
||||
return value
|
||||
}
|
||||
// fallthrough
|
||||
}
|
||||
// Does one dereference or indirection work? We could do more, as we
|
||||
// do with method receivers, but that gets messy and method receivers
|
||||
// are much more constrained, so it makes more sense there than here.
|
||||
// Besides, one is almost always all you need.
|
||||
switch {
|
||||
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
|
||||
value = value.Elem()
|
||||
if !value.IsValid() {
|
||||
s.errorf("dereference of nil pointer of type %s", typ)
|
||||
}
|
||||
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
|
||||
value = value.Addr()
|
||||
default:
|
||||
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch arg := n.(type) {
|
||||
case *parse.DotNode:
|
||||
return s.validateType(dot, typ)
|
||||
case *parse.NilNode:
|
||||
if canBeNil(typ) {
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("cannot assign nil to %s", typ)
|
||||
case *parse.FieldNode:
|
||||
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
|
||||
case *parse.VariableNode:
|
||||
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
|
||||
case *parse.PipeNode:
|
||||
return s.validateType(s.evalPipeline(dot, arg), typ)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, arg, arg, nil, zero)
|
||||
case *parse.ChainNode:
|
||||
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return s.evalBool(typ, n)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return s.evalComplex(typ, n)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return s.evalFloat(typ, n)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return s.evalInteger(typ, n)
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return s.evalEmptyInterface(dot, n)
|
||||
}
|
||||
case reflect.String:
|
||||
return s.evalString(typ, n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return s.evalUnsignedInteger(typ, n)
|
||||
}
|
||||
s.errorf("can't handle %s for arg of type %s", n, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.BoolNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetBool(n.True)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected bool; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.StringNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetString(n.Text)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected string; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetInt(n.Int64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetUint(n.Uint64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected unsigned integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetFloat(n.Float64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected float; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetComplex(n.Complex128)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected complex; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch n := n.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(n.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, nil, zero)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, n, n, nil, zero)
|
||||
case *parse.NilNode:
|
||||
// NilNode is handled in evalArg, the only place that calls here.
|
||||
s.errorf("evalEmptyInterface: nil (can't happen)")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(n)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(n.Text)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, nil, zero)
|
||||
case *parse.PipeNode:
|
||||
return s.evalPipeline(dot, n)
|
||||
}
|
||||
s.errorf("can't handle assignment of %s to empty interface argument", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printValue writes the textual representation of the value to the output of
|
||||
// the template.
|
||||
func (s *state) printValue(n parse.Node, v reflect.Value) {
|
||||
s.at(n)
|
||||
iface, ok := printableValue(v)
|
||||
if !ok {
|
||||
s.errorf("can't print %s of type %s", n, v.Type())
|
||||
}
|
||||
fmt.Fprint(s.wr, iface)
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// Types to help sort the keys in a map for reproducible output.
|
||||
|
||||
type rvs []reflect.Value
|
||||
|
||||
func (x rvs) Len() int { return len(x) }
|
||||
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
type rvInts struct{ rvs }
|
||||
|
||||
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
|
||||
|
||||
type rvUints struct{ rvs }
|
||||
|
||||
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
|
||||
|
||||
type rvFloats struct{ rvs }
|
||||
|
||||
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
|
||||
|
||||
type rvStrings struct{ rvs }
|
||||
|
||||
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
|
||||
|
||||
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
|
||||
func sortKeys(v []reflect.Value) []reflect.Value {
|
||||
if len(v) <= 1 {
|
||||
return v
|
||||
}
|
||||
switch v[0].Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sort.Sort(rvFloats{v})
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
sort.Sort(rvInts{v})
|
||||
case reflect.String:
|
||||
sort.Sort(rvStrings{v})
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
sort.Sort(rvUints{v})
|
||||
}
|
||||
return v
|
||||
}
|
||||
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
@@ -1,598 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
@@ -1,108 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Template, error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new Template and parses the template definitions from
|
||||
// the named files. The returned template's name will have the (base) name and
|
||||
// (parsed) contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
name := filepath.Base(filename)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new Template and parses the template definitions from the
|
||||
// files identified by the pattern, which must match at least one file. The
|
||||
// returned template will have the (base) name and (parsed) contents of the
|
||||
// first file matched by the pattern. ParseGlob is equivalent to calling
|
||||
// ParseFiles with the list of files matched by the pattern.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The pattern is
|
||||
// processed by filepath.Glob and must match at least one file. ParseGlob is
|
||||
// equivalent to calling t.ParseFiles with the list of files matched by the
|
||||
// pattern.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
@@ -1,556 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemColonEquals // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemElideNewline // elide newline after right delim
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action
|
||||
rightDelim string // end of action
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexText; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
for {
|
||||
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
if l.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present.
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
if strings.HasPrefix(l.input[l.pos:], leftComment) {
|
||||
return lexComment
|
||||
}
|
||||
l.emit(itemLeftDelim)
|
||||
l.parenDepth = 0
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
i := strings.Index(l.input[l.pos:], rightComment)
|
||||
if i < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(i + len(rightComment))
|
||||
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.ignore()
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.emit(itemRightDelim)
|
||||
if l.peek() == '\\' {
|
||||
l.pos++
|
||||
l.emit(itemElideNewline)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
return lexSpace
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
l.emit(itemColonEquals)
|
||||
case r == '|':
|
||||
l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.emit(itemLeftParen)
|
||||
l.parenDepth++
|
||||
return lexInsideAction
|
||||
case r == ')':
|
||||
l.emit(itemRightParen)
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
l.emit(itemChar)
|
||||
return lexInsideAction
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// One space has already been seen.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
for isSpace(l.peek()) {
|
||||
l.next()
|
||||
}
|
||||
l.emit(itemSpace)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
l.emit(key[word])
|
||||
case word[0] == '.':
|
||||
l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
l.emit(itemBool)
|
||||
default:
|
||||
l.emit(itemIdentifier)
|
||||
}
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
l.emit(itemVariable)
|
||||
return lexInsideAction
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
l.emit(itemVariable)
|
||||
} else {
|
||||
l.emit(itemDot)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
l.emit(typ)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
|
||||
// succeed but should fail) but only in extremely rare cases caused by willfully
|
||||
// bad choice of delimiter.
|
||||
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemCharConstant)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(itemComplex)
|
||||
} else {
|
||||
l.emit(itemNumber)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789"
|
||||
if l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemRawString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
@@ -1,834 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Parse nodes.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
|
||||
|
||||
// A Node is an element in the parse tree. The interface is trivial.
|
||||
// The interface contains an unexported method so that only
|
||||
// types local to this package can satisfy it.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
// Copy does a deep copy of the Node and all its components.
|
||||
// To avoid type assertions, some XxxNodes also have specialized
|
||||
// CopyXxx methods that return *XxxNode.
|
||||
Copy() Node
|
||||
Position() Pos // byte position of start of node in full original input string
|
||||
// tree returns the containing *Tree.
|
||||
// It is unexported so all implementations of Node are in this package.
|
||||
tree() *Tree
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // Plain text.
|
||||
NodeAction // A non-control action such as a field evaluation.
|
||||
NodeBool // A boolean constant.
|
||||
NodeChain // A sequence of field accesses.
|
||||
NodeCommand // An element of a pipeline.
|
||||
NodeDot // The cursor, dot.
|
||||
nodeElse // An else action. Not added to tree.
|
||||
nodeEnd // An end action. Not added to tree.
|
||||
NodeField // A field or method name.
|
||||
NodeIdentifier // An identifier; always a function name.
|
||||
NodeIf // An if action.
|
||||
NodeList // A list of Nodes.
|
||||
NodeNil // An untyped nil constant.
|
||||
NodeNumber // A numerical constant.
|
||||
NodePipe // A pipeline of commands.
|
||||
NodeRange // A range action.
|
||||
NodeString // A string constant.
|
||||
NodeTemplate // A template invocation action.
|
||||
NodeVariable // A $ variable.
|
||||
NodeWith // A with action.
|
||||
)
|
||||
|
||||
// Nodes.
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newList(pos Pos) *ListNode {
|
||||
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) tree() *Tree {
|
||||
return l.tr
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, n := range l.Nodes {
|
||||
fmt.Fprint(b, n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (l *ListNode) CopyList() *ListNode {
|
||||
if l == nil {
|
||||
return l
|
||||
}
|
||||
n := l.tr.newList(l.Pos)
|
||||
for _, elem := range l.Nodes {
|
||||
n.append(elem.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (l *ListNode) Copy() Node {
|
||||
return l.CopyList()
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Text []byte // The text; may span newlines.
|
||||
}
|
||||
|
||||
func (t *Tree) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf(textFormat, t.Text)
|
||||
}
|
||||
|
||||
func (t *TextNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TextNode) Copy() Node {
|
||||
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
|
||||
}
|
||||
|
||||
// PipeNode holds a pipeline with optional declaration
|
||||
type PipeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Decl []*VariableNode // Variable declarations in lexical order.
|
||||
Cmds []*CommandNode // The commands in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
|
||||
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
|
||||
}
|
||||
|
||||
func (p *PipeNode) append(command *CommandNode) {
|
||||
p.Cmds = append(p.Cmds, command)
|
||||
}
|
||||
|
||||
func (p *PipeNode) String() string {
|
||||
s := ""
|
||||
if len(p.Decl) > 0 {
|
||||
for i, v := range p.Decl {
|
||||
if i > 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += v.String()
|
||||
}
|
||||
s += " := "
|
||||
}
|
||||
for i, c := range p.Cmds {
|
||||
if i > 0 {
|
||||
s += " | "
|
||||
}
|
||||
s += c.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *PipeNode) tree() *Tree {
|
||||
return p.tr
|
||||
}
|
||||
|
||||
func (p *PipeNode) CopyPipe() *PipeNode {
|
||||
if p == nil {
|
||||
return p
|
||||
}
|
||||
var decl []*VariableNode
|
||||
for _, d := range p.Decl {
|
||||
decl = append(decl, d.Copy().(*VariableNode))
|
||||
}
|
||||
n := p.tr.newPipeline(p.Pos, p.Line, decl)
|
||||
for _, c := range p.Cmds {
|
||||
n.append(c.Copy().(*CommandNode))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *PipeNode) Copy() Node {
|
||||
return p.CopyPipe()
|
||||
}
|
||||
|
||||
// ActionNode holds an action (something bounded by delimiters).
|
||||
// Control actions have their own nodes; ActionNode represents simple
|
||||
// ones such as field evaluations and parenthesized pipelines.
|
||||
type ActionNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline in the action.
|
||||
}
|
||||
|
||||
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
|
||||
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (a *ActionNode) String() string {
|
||||
return fmt.Sprintf("{{%s}}", a.Pipe)
|
||||
|
||||
}
|
||||
|
||||
func (a *ActionNode) tree() *Tree {
|
||||
return a.tr
|
||||
}
|
||||
|
||||
func (a *ActionNode) Copy() Node {
|
||||
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
|
||||
|
||||
}
|
||||
|
||||
// CommandNode holds a command (a pipeline inside an evaluating action).
|
||||
type CommandNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Args []Node // Arguments in lexical order: Identifier, field, or constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newCommand(pos Pos) *CommandNode {
|
||||
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
|
||||
}
|
||||
|
||||
func (c *CommandNode) append(arg Node) {
|
||||
c.Args = append(c.Args, arg)
|
||||
}
|
||||
|
||||
func (c *CommandNode) String() string {
|
||||
s := ""
|
||||
for i, arg := range c.Args {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
if arg, ok := arg.(*PipeNode); ok {
|
||||
s += "(" + arg.String() + ")"
|
||||
continue
|
||||
}
|
||||
s += arg.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *CommandNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *CommandNode) Copy() Node {
|
||||
if c == nil {
|
||||
return c
|
||||
}
|
||||
n := c.tr.newCommand(c.Pos)
|
||||
for _, c := range c.Args {
|
||||
n.append(c.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier.
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident string // The identifier's name.
|
||||
}
|
||||
|
||||
// NewIdentifier returns a new IdentifierNode with the given identifier name.
|
||||
func NewIdentifier(ident string) *IdentifierNode {
|
||||
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
|
||||
}
|
||||
|
||||
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
|
||||
i.Pos = pos
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
|
||||
i.tr = t
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) String() string {
|
||||
return i.Ident
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) tree() *Tree {
|
||||
return i.tr
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) Copy() Node {
|
||||
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
|
||||
}
|
||||
|
||||
// VariableNode holds a list of variable names, possibly with chained field
|
||||
// accesses. The dollar sign is part of the (first) name.
|
||||
type VariableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // Variable name and fields in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
|
||||
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
|
||||
}
|
||||
|
||||
func (v *VariableNode) String() string {
|
||||
s := ""
|
||||
for i, id := range v.Ident {
|
||||
if i > 0 {
|
||||
s += "."
|
||||
}
|
||||
s += id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (v *VariableNode) tree() *Tree {
|
||||
return v.tr
|
||||
}
|
||||
|
||||
func (v *VariableNode) Copy() Node {
|
||||
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
|
||||
}
|
||||
|
||||
// DotNode holds the special identifier '.'.
|
||||
type DotNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newDot(pos Pos) *DotNode {
|
||||
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
|
||||
}
|
||||
|
||||
func (d *DotNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeDot
|
||||
}
|
||||
|
||||
func (d *DotNode) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
func (d *DotNode) tree() *Tree {
|
||||
return d.tr
|
||||
}
|
||||
|
||||
func (d *DotNode) Copy() Node {
|
||||
return d.tr.newDot(d.Pos)
|
||||
}
|
||||
|
||||
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
|
||||
type NilNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newNil(pos Pos) *NilNode {
|
||||
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
|
||||
}
|
||||
|
||||
func (n *NilNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeNil
|
||||
}
|
||||
|
||||
func (n *NilNode) String() string {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
func (n *NilNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NilNode) Copy() Node {
|
||||
return n.tr.newNil(n.Pos)
|
||||
}
|
||||
|
||||
// FieldNode holds a field (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The period is dropped from each ident.
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
|
||||
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
s := ""
|
||||
for _, id := range f.Ident {
|
||||
s += "." + id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *FieldNode) tree() *Tree {
|
||||
return f.tr
|
||||
}
|
||||
|
||||
func (f *FieldNode) Copy() Node {
|
||||
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
|
||||
}
|
||||
|
||||
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The periods are dropped from each ident.
|
||||
type ChainNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Node Node
|
||||
Field []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
|
||||
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
|
||||
}
|
||||
|
||||
// Add adds the named field (which should start with a period) to the end of the chain.
|
||||
func (c *ChainNode) Add(field string) {
|
||||
if len(field) == 0 || field[0] != '.' {
|
||||
panic("no dot in field")
|
||||
}
|
||||
field = field[1:] // Remove leading dot.
|
||||
if field == "" {
|
||||
panic("empty field")
|
||||
}
|
||||
c.Field = append(c.Field, field)
|
||||
}
|
||||
|
||||
func (c *ChainNode) String() string {
|
||||
s := c.Node.String()
|
||||
if _, ok := c.Node.(*PipeNode); ok {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
for _, field := range c.Field {
|
||||
s += "." + field
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *ChainNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *ChainNode) Copy() Node {
|
||||
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
|
||||
}
|
||||
|
||||
// BoolNode holds a boolean constant.
|
||||
type BoolNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
True bool // The value of the boolean constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
|
||||
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
|
||||
}
|
||||
|
||||
func (b *BoolNode) String() string {
|
||||
if b.True {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (b *BoolNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BoolNode) Copy() Node {
|
||||
return b.tr.newBool(b.Pos, b.True)
|
||||
}
|
||||
|
||||
// NumberNode holds a number: signed or unsigned integer, float, or complex.
|
||||
// The value is parsed and stored under all the types that can represent the value.
|
||||
// This simulates in a small amount of code the behavior of Go's ideal constants.
|
||||
type NumberNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
IsInt bool // Number has an integral value.
|
||||
IsUint bool // Number has an unsigned integral value.
|
||||
IsFloat bool // Number has a floating-point value.
|
||||
IsComplex bool // Number is complex.
|
||||
Int64 int64 // The signed integer value.
|
||||
Uint64 uint64 // The unsigned integer value.
|
||||
Float64 float64 // The floating-point value.
|
||||
Complex128 complex128 // The complex value.
|
||||
Text string // The original textual representation from the input.
|
||||
}
|
||||
|
||||
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
|
||||
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
|
||||
switch typ {
|
||||
case itemCharConstant:
|
||||
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tail != "'" {
|
||||
return nil, fmt.Errorf("malformed character constant: %s", text)
|
||||
}
|
||||
n.Int64 = int64(rune)
|
||||
n.IsInt = true
|
||||
n.Uint64 = uint64(rune)
|
||||
n.IsUint = true
|
||||
n.Float64 = float64(rune) // odd but those are the rules.
|
||||
n.IsFloat = true
|
||||
return n, nil
|
||||
case itemComplex:
|
||||
// fmt.Sscan can parse the pair, so let it do the work.
|
||||
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.IsComplex = true
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
// Imaginary constants can only be complex unless they are zero.
|
||||
if len(text) > 0 && text[len(text)-1] == 'i' {
|
||||
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
|
||||
if err == nil {
|
||||
n.IsComplex = true
|
||||
n.Complex128 = complex(0, f)
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
// Do integer test first so we get 0x123 etc.
|
||||
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
|
||||
if err == nil {
|
||||
n.IsUint = true
|
||||
n.Uint64 = u
|
||||
}
|
||||
i, err := strconv.ParseInt(text, 0, 64)
|
||||
if err == nil {
|
||||
n.IsInt = true
|
||||
n.Int64 = i
|
||||
if i == 0 {
|
||||
n.IsUint = true // in case of -0.
|
||||
n.Uint64 = u
|
||||
}
|
||||
}
|
||||
// If an integer extraction succeeded, promote the float.
|
||||
if n.IsInt {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Int64)
|
||||
} else if n.IsUint {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Uint64)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(text, 64)
|
||||
if err == nil {
|
||||
n.IsFloat = true
|
||||
n.Float64 = f
|
||||
// If a floating-point extraction succeeded, extract the int if needed.
|
||||
if !n.IsInt && float64(int64(f)) == f {
|
||||
n.IsInt = true
|
||||
n.Int64 = int64(f)
|
||||
}
|
||||
if !n.IsUint && float64(uint64(f)) == f {
|
||||
n.IsUint = true
|
||||
n.Uint64 = uint64(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !n.IsInt && !n.IsUint && !n.IsFloat {
|
||||
return nil, fmt.Errorf("illegal number syntax: %q", text)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// simplifyComplex pulls out any other types that are represented by the complex number.
|
||||
// These all require that the imaginary part be zero.
|
||||
func (n *NumberNode) simplifyComplex() {
|
||||
n.IsFloat = imag(n.Complex128) == 0
|
||||
if n.IsFloat {
|
||||
n.Float64 = real(n.Complex128)
|
||||
n.IsInt = float64(int64(n.Float64)) == n.Float64
|
||||
if n.IsInt {
|
||||
n.Int64 = int64(n.Float64)
|
||||
}
|
||||
n.IsUint = float64(uint64(n.Float64)) == n.Float64
|
||||
if n.IsUint {
|
||||
n.Uint64 = uint64(n.Float64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NumberNode) String() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (n *NumberNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NumberNode) Copy() Node {
|
||||
nn := new(NumberNode)
|
||||
*nn = *n // Easy, fast, correct.
|
||||
return nn
|
||||
}
|
||||
|
||||
// StringNode holds a string constant. The value has been "unquoted".
|
||||
type StringNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Quoted string // The original text of the string, with quotes.
|
||||
Text string // The string, after quote processing.
|
||||
}
|
||||
|
||||
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
|
||||
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
|
||||
}
|
||||
|
||||
func (s *StringNode) String() string {
|
||||
return s.Quoted
|
||||
}
|
||||
|
||||
func (s *StringNode) tree() *Tree {
|
||||
return s.tr
|
||||
}
|
||||
|
||||
func (s *StringNode) Copy() Node {
|
||||
return s.tr.newString(s.Pos, s.Quoted, s.Text)
|
||||
}
|
||||
|
||||
// endNode represents an {{end}} action.
|
||||
// It does not appear in the final parse tree.
|
||||
type endNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newEnd(pos Pos) *endNode {
|
||||
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
|
||||
}
|
||||
|
||||
func (e *endNode) String() string {
|
||||
return "{{end}}"
|
||||
}
|
||||
|
||||
func (e *endNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *endNode) Copy() Node {
|
||||
return e.tr.newEnd(e.Pos)
|
||||
}
|
||||
|
||||
// elseNode represents an {{else}} action. Does not appear in the final tree.
|
||||
type elseNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
}
|
||||
|
||||
func (t *Tree) newElse(pos Pos, line int) *elseNode {
|
||||
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
|
||||
}
|
||||
|
||||
func (e *elseNode) Type() NodeType {
|
||||
return nodeElse
|
||||
}
|
||||
|
||||
func (e *elseNode) String() string {
|
||||
return "{{else}}"
|
||||
}
|
||||
|
||||
func (e *elseNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *elseNode) Copy() Node {
|
||||
return e.tr.newElse(e.Pos, e.Line)
|
||||
}
|
||||
|
||||
// BranchNode is the common representation of if, range, and with.
|
||||
type BranchNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline to be evaluated.
|
||||
List *ListNode // What to execute if the value is non-empty.
|
||||
ElseList *ListNode // What to execute if the value is empty (nil if absent).
|
||||
}
|
||||
|
||||
func (b *BranchNode) String() string {
|
||||
name := ""
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
name = "if"
|
||||
case NodeRange:
|
||||
name = "range"
|
||||
case NodeWith:
|
||||
name = "with"
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
if b.ElseList != nil {
|
||||
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
|
||||
}
|
||||
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
|
||||
}
|
||||
|
||||
func (b *BranchNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BranchNode) Copy() Node {
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeRange:
|
||||
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeWith:
|
||||
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
}
|
||||
|
||||
// IfNode represents an {{if}} action and its commands.
|
||||
type IfNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
|
||||
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (i *IfNode) Copy() Node {
|
||||
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// RangeNode represents a {{range}} action and its commands.
|
||||
type RangeNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
|
||||
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (r *RangeNode) Copy() Node {
|
||||
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// WithNode represents a {{with}} action and its commands.
|
||||
type WithNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
|
||||
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (w *WithNode) Copy() Node {
|
||||
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// TemplateNode represents a {{template}} action.
|
||||
type TemplateNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Name string // The name of the template (unquoted).
|
||||
Pipe *PipeNode // The command to evaluate as dot for the template.
|
||||
}
|
||||
|
||||
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
|
||||
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (t *TemplateNode) String() string {
|
||||
if t.Pipe == nil {
|
||||
return fmt.Sprintf("{{template %q}}", t.Name)
|
||||
}
|
||||
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
|
||||
}
|
||||
|
||||
func (t *TemplateNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TemplateNode) Copy() Node {
|
||||
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
|
||||
}
|
||||
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
@@ -1,700 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]interface{}
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
}
|
||||
|
||||
// Copy returns a copy of the Tree. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to parse.Tree, created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
|
||||
treeSet = make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]interface{}) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
if len(context) > 20 {
|
||||
context = fmt.Sprintf("%.20s...", context)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...interface{}) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
|
||||
t.text = text
|
||||
t.parse(treeSet)
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to the treeSet.
|
||||
func (t *Tree) add(treeSet map[string]*Tree) {
|
||||
tree := treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex)
|
||||
newT.parseDefinition(treeSet)
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
n := t.textOrAction()
|
||||
if n.Type() == nodeEnd {
|
||||
t.errorf("unexpected %s", n)
|
||||
}
|
||||
t.Root.append(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in the treeSet map. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
// textOrAction*
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
// text | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElideNewline:
|
||||
return t.elideNewline()
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
return t.action()
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// elideNewline:
|
||||
// Remove newlines trailing rightDelim if \\ is present.
|
||||
func (t *Tree) elideNewline() Node {
|
||||
token := t.peek()
|
||||
if token.typ != itemText {
|
||||
t.unexpected(token, "input")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.next()
|
||||
stripped := strings.TrimLeft(token.val, "\n\r")
|
||||
diff := len(token.val) - len(stripped)
|
||||
if diff > 0 {
|
||||
// This is a bit nasty. We mutate the token in-place to remove
|
||||
// preceding newlines.
|
||||
token.pos += Pos(diff)
|
||||
token.val = stripped
|
||||
}
|
||||
return t.newText(token.pos, token.val)
|
||||
}
|
||||
|
||||
// Action:
|
||||
// control
|
||||
// command ("|" command)*
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
|
||||
var decl []*VariableNode
|
||||
pos := t.peekNonSpace().pos
|
||||
// Are there declarations?
|
||||
for {
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
|
||||
t.nextNonSpace()
|
||||
variable := t.newVariable(v.pos, v.val)
|
||||
decl = append(decl, variable)
|
||||
t.vars = append(t.vars, v.val)
|
||||
if next.typ == itemChar && next.val == "," {
|
||||
if context == "range" && len(decl) < 2 {
|
||||
continue
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
}
|
||||
} else if tokenAfterVariable.typ == itemSpace {
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
} else {
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemRightDelim, itemRightParen:
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
if token.typ == itemRightParen {
|
||||
t.backup()
|
||||
}
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
line = t.lex.lineNumber()
|
||||
pipe = t.pipeline(context)
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
if allowElseIf {
|
||||
// Special case for "else if". If the "else" is followed immediately by an "if",
|
||||
// the elseControl will have left the "if" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
|
||||
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
|
||||
// is assumed. This technique works even for long if-else-if chains.
|
||||
// TODO: Should we allow else-if in with and range?
|
||||
if t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
// Do not consume the next item - only one {{end}} required.
|
||||
break
|
||||
}
|
||||
}
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
return pipe.Position(), line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl(true, "if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
return t.newRange(t.parseControl(false, "range"))
|
||||
}
|
||||
|
||||
// With:
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl(false, "with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
// {{end}}
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
// {{else}}
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
// Special case for "else if".
|
||||
peek := t.peekNonSpace()
|
||||
if peek.typ == itemIf {
|
||||
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
|
||||
return t.newElse(peek.pos, t.lex.lineNumber())
|
||||
}
|
||||
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
|
||||
}
|
||||
|
||||
// Template:
|
||||
// {{template stringValue pipeline}}
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
var name string
|
||||
token := t.nextNonSpace()
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, "template invocation")
|
||||
}
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline("template")
|
||||
}
|
||||
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
|
||||
}
|
||||
|
||||
// command:
|
||||
// operand (space operand)*
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
default:
|
||||
t.errorf("unexpected %s in operand; missing space?", token)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
// term .Field*
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// TODO: Switch to Chains always when we can.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemIdentifier:
|
||||
if !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
pipe := t.pipeline("parenthesized pipeline")
|
||||
if token := t.next(); token.typ != itemRightParen {
|
||||
t.errorf("unclosed right paren: unexpected %s", token)
|
||||
}
|
||||
return pipe
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
||||
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
@@ -1,218 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by html/template and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new template with the given name.
|
||||
func New(name string) *Template {
|
||||
return &Template{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
return &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
t.common = new(common)
|
||||
t.tmpl = make(map[string]*Template)
|
||||
t.parseFuncs = make(FuncMap)
|
||||
t.execFuncs = make(map[string]reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to Parse in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
nt.tmpl[t.name] = nt
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name { // Already installed.
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
nt := New(t.name)
|
||||
nt.Tree = t.Tree
|
||||
nt.common = c
|
||||
nt.leftDelim = t.leftDelim
|
||||
nt.rightDelim = t.rightDelim
|
||||
return nt
|
||||
}
|
||||
|
||||
// AddParseTree creates a new template with the name and parse tree
|
||||
// and associates it with t.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
if t.common != nil && t.tmpl[name] != nil {
|
||||
return nil, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
nt := t.New(name)
|
||||
nt.Tree = tree
|
||||
t.tmpl[name] = nt
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of the templates associated with t, including t
|
||||
// itself.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type. However, it is legal to overwrite elements of the map. The return
|
||||
// value is the template, so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t,
|
||||
// or nil if there is no such template.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses a string into a template. Nested template definitions will be
|
||||
// associated with the top-level template t. Parse may be called multiple times
|
||||
// to parse definitions of templates to associate with t. It is an error if a
|
||||
// resulting template is non-empty (contains content other than template
|
||||
// definitions) and would replace a non-empty template with the same name.
|
||||
// (In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions.)
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
// If the name we parsed is the name of this template, overwrite this template.
|
||||
// The associate method checks it's not a redefinition.
|
||||
tmpl := t
|
||||
if name != t.name {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
// Even if t == tmpl, we need to install it in the common.tmpl map.
|
||||
if replace, err := t.associate(tmpl, tree); err != nil {
|
||||
return nil, err
|
||||
} else if replace {
|
||||
tmpl.Tree = tree
|
||||
}
|
||||
tmpl.leftDelim = t.leftDelim
|
||||
tmpl.rightDelim = t.rightDelim
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. It is an error to reuse a name except to overwrite an empty
|
||||
// template. The two are already known to share the common structure.
|
||||
// The boolean return value reports wither to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
name := new.name
|
||||
if old := t.tmpl[name]; old != nil {
|
||||
oldIsEmpty := parse.IsEmptyTree(old.Root)
|
||||
newIsEmpty := parse.IsEmptyTree(tree.Root)
|
||||
if newIsEmpty {
|
||||
// Whether old is empty or not, new is empty; no reason to replace old.
|
||||
return false, nil
|
||||
}
|
||||
if !oldIsEmpty {
|
||||
return false, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
}
|
||||
t.tmpl[name] = new
|
||||
return true, nil
|
||||
}
|
||||
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
@@ -1,19 +0,0 @@
|
||||
Copyright (C) 2014 Alec Thomas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
package units
|
||||
|
||||
// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
|
||||
// etc.).
|
||||
type Base2Bytes int64
|
||||
|
||||
// Base-2 byte units.
|
||||
const (
|
||||
Kibibyte Base2Bytes = 1024
|
||||
KiB = Kibibyte
|
||||
Mebibyte = Kibibyte * 1024
|
||||
MiB = Mebibyte
|
||||
Gibibyte = Mebibyte * 1024
|
||||
GiB = Gibibyte
|
||||
Tebibyte = Gibibyte * 1024
|
||||
TiB = Tebibyte
|
||||
Pebibyte = Tebibyte * 1024
|
||||
PiB = Pebibyte
|
||||
Exbibyte = Pebibyte * 1024
|
||||
EiB = Exbibyte
|
||||
)
|
||||
|
||||
var (
|
||||
bytesUnitMap = MakeUnitMap("iB", "B", 1024)
|
||||
oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
|
||||
)
|
||||
|
||||
// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
|
||||
// and KiB are both 1024.
|
||||
func ParseBase2Bytes(s string) (Base2Bytes, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, oldBytesUnitMap)
|
||||
}
|
||||
return Base2Bytes(n), err
|
||||
}
|
||||
|
||||
func (b Base2Bytes) String() string {
|
||||
return ToString(int64(b), 1024, "iB", "B")
|
||||
}
|
||||
|
||||
var (
|
||||
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
|
||||
)
|
||||
|
||||
// MetricBytes are SI byte units (1000 bytes in a kilobyte).
|
||||
type MetricBytes SI
|
||||
|
||||
// SI base-10 byte units.
|
||||
const (
|
||||
Kilobyte MetricBytes = 1000
|
||||
KB = Kilobyte
|
||||
Megabyte = Kilobyte * 1000
|
||||
MB = Megabyte
|
||||
Gigabyte = Megabyte * 1000
|
||||
GB = Gigabyte
|
||||
Terabyte = Gigabyte * 1000
|
||||
TB = Terabyte
|
||||
Petabyte = Terabyte * 1000
|
||||
PB = Petabyte
|
||||
Exabyte = Petabyte * 1000
|
||||
EB = Exabyte
|
||||
)
|
||||
|
||||
// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
|
||||
func ParseMetricBytes(s string) (MetricBytes, error) {
|
||||
n, err := ParseUnit(s, metricBytesUnitMap)
|
||||
return MetricBytes(n), err
|
||||
}
|
||||
|
||||
func (m MetricBytes) String() string {
|
||||
return ToString(int64(m), 1000, "B", "B")
|
||||
}
|
||||
|
||||
// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
|
||||
// respectively. That is, KiB represents 1024 and KB represents 1000.
|
||||
func ParseStrictBytes(s string) (int64, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, metricBytesUnitMap)
|
||||
}
|
||||
return int64(n), err
|
||||
}
|
||||
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Package units provides helpful unit multipliers and functions for Go.
|
||||
//
|
||||
// The goal of this package is to have functionality similar to the time [1] package.
|
||||
//
|
||||
//
|
||||
// [1] http://golang.org/pkg/time/
|
||||
//
|
||||
// It allows for code like this:
|
||||
//
|
||||
// n, err := ParseBase2Bytes("1KB")
|
||||
// // n == 1024
|
||||
// n = units.Mebibyte * 512
|
||||
package units
|
||||
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
package units
|
||||
|
||||
// SI units.
|
||||
type SI int64
|
||||
|
||||
// SI unit multiples.
|
||||
const (
|
||||
Kilo SI = 1000
|
||||
Mega = Kilo * 1000
|
||||
Giga = Mega * 1000
|
||||
Tera = Giga * 1000
|
||||
Peta = Tera * 1000
|
||||
Exa = Peta * 1000
|
||||
)
|
||||
|
||||
func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
shortSuffix: 1,
|
||||
"K" + suffix: float64(scale),
|
||||
"M" + suffix: float64(scale * scale),
|
||||
"G" + suffix: float64(scale * scale * scale),
|
||||
"T" + suffix: float64(scale * scale * scale * scale),
|
||||
"P" + suffix: float64(scale * scale * scale * scale * scale),
|
||||
"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
|
||||
}
|
||||
}
|
||||
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
@@ -1,138 +0,0 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
|
||||
)
|
||||
|
||||
func ToString(n int64, scale int64, suffix, baseSuffix string) string {
|
||||
mn := len(siUnits)
|
||||
out := make([]string, mn)
|
||||
for i, m := range siUnits {
|
||||
if n%scale != 0 || i == 0 && n == 0 {
|
||||
s := suffix
|
||||
if i == 0 {
|
||||
s = baseSuffix
|
||||
}
|
||||
out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
|
||||
}
|
||||
n /= scale
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return strings.Join(out, "")
|
||||
}
|
||||
|
||||
// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
|
||||
var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
|
||||
|
||||
// leadingInt consumes the leading [0-9]* from s.
|
||||
func leadingInt(s string) (x int64, rem string, err error) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if x >= (1<<63-10)/10 {
|
||||
// overflow
|
||||
return 0, "", errLeadingInt
|
||||
}
|
||||
x = x*10 + int64(c) - '0'
|
||||
}
|
||||
return x, s[i:], nil
|
||||
}
|
||||
|
||||
func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
|
||||
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||
orig := s
|
||||
f := float64(0)
|
||||
neg := false
|
||||
|
||||
// Consume [-+]?
|
||||
if s != "" {
|
||||
c := s[0]
|
||||
if c == '-' || c == '+' {
|
||||
neg = c == '-'
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
// Special case: if all that is left is "0", this is zero.
|
||||
if s == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
for s != "" {
|
||||
g := float64(0) // this element of the sequence
|
||||
|
||||
var x int64
|
||||
var err error
|
||||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
g = float64(x)
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
// Consume (\.[0-9]*)?
|
||||
post := false
|
||||
if s != "" && s[0] == '.' {
|
||||
s = s[1:]
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
scale := 1.0
|
||||
for n := pl - len(s); n > 0; n-- {
|
||||
scale *= 10
|
||||
}
|
||||
g += float64(x) / scale
|
||||
post = pl != len(s)
|
||||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '.' || ('0' <= c && c <= '9') {
|
||||
break
|
||||
}
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("units: unknown unit " + u + " in " + orig)
|
||||
}
|
||||
|
||||
f += g * unit
|
||||
}
|
||||
|
||||
if neg {
|
||||
f = -f
|
||||
}
|
||||
if f < float64(-1<<63) || f > float64(1<<63-1) {
|
||||
return 0, errors.New("units: overflow parsing unit")
|
||||
}
|
||||
return int64(f), nil
|
||||
}
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,292 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
201
vendor/github.com/dimchansky/utfbom/LICENSE
generated
vendored
201
vendor/github.com/dimchansky/utfbom/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
192
vendor/github.com/dimchansky/utfbom/utfbom.go
generated
vendored
192
vendor/github.com/dimchansky/utfbom/utfbom.go
generated
vendored
@@ -1,192 +0,0 @@
|
||||
// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary.
|
||||
// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader
|
||||
// interface but provides automatic BOM checking and removing as necessary.
|
||||
package utfbom
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Encoding is type alias for detected UTF encoding.
|
||||
type Encoding int
|
||||
|
||||
// Constants to identify detected UTF encodings.
|
||||
const (
|
||||
// Unknown encoding, returned when no BOM was detected
|
||||
Unknown Encoding = iota
|
||||
|
||||
// UTF8, BOM bytes: EF BB BF
|
||||
UTF8
|
||||
|
||||
// UTF-16, big-endian, BOM bytes: FE FF
|
||||
UTF16BigEndian
|
||||
|
||||
// UTF-16, little-endian, BOM bytes: FF FE
|
||||
UTF16LittleEndian
|
||||
|
||||
// UTF-32, big-endian, BOM bytes: 00 00 FE FF
|
||||
UTF32BigEndian
|
||||
|
||||
// UTF-32, little-endian, BOM bytes: FF FE 00 00
|
||||
UTF32LittleEndian
|
||||
)
|
||||
|
||||
// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface.
|
||||
func (e Encoding) String() string {
|
||||
switch e {
|
||||
case UTF8:
|
||||
return "UTF8"
|
||||
case UTF16BigEndian:
|
||||
return "UTF16BigEndian"
|
||||
case UTF16LittleEndian:
|
||||
return "UTF16LittleEndian"
|
||||
case UTF32BigEndian:
|
||||
return "UTF32BigEndian"
|
||||
case UTF32LittleEndian:
|
||||
return "UTF32LittleEndian"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const maxConsecutiveEmptyReads = 100
|
||||
|
||||
// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
|
||||
// It also returns the encoding detected by the BOM.
|
||||
// If the detected encoding is not needed, you can call the SkipOnly function.
|
||||
func Skip(rd io.Reader) (*Reader, Encoding) {
|
||||
// Is it already a Reader?
|
||||
b, ok := rd.(*Reader)
|
||||
if ok {
|
||||
return b, Unknown
|
||||
}
|
||||
|
||||
enc, left, err := detectUtf(rd)
|
||||
return &Reader{
|
||||
rd: rd,
|
||||
buf: left,
|
||||
err: err,
|
||||
}, enc
|
||||
}
|
||||
|
||||
// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
|
||||
func SkipOnly(rd io.Reader) *Reader {
|
||||
r, _ := Skip(rd)
|
||||
return r
|
||||
}
|
||||
|
||||
// Reader implements automatic BOM (Unicode Byte Order Mark) checking and
|
||||
// removing as necessary for an io.Reader object.
|
||||
type Reader struct {
|
||||
rd io.Reader // reader provided by the client
|
||||
buf []byte // buffered data
|
||||
err error // last error
|
||||
}
|
||||
|
||||
// Read is an implementation of io.Reader interface.
|
||||
// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary.
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if r.buf == nil {
|
||||
if r.err != nil {
|
||||
return 0, r.readErr()
|
||||
}
|
||||
|
||||
return r.rd.Read(p)
|
||||
}
|
||||
|
||||
// copy as much as we can
|
||||
n = copy(p, r.buf)
|
||||
r.buf = nilIfEmpty(r.buf[n:])
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *Reader) readErr() error {
|
||||
err := r.err
|
||||
r.err = nil
|
||||
return err
|
||||
}
|
||||
|
||||
var errNegativeRead = errors.New("utfbom: reader returned negative count from Read")
|
||||
|
||||
func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) {
|
||||
buf, err = readBOM(rd)
|
||||
|
||||
if len(buf) >= 4 {
|
||||
if isUTF32BigEndianBOM4(buf) {
|
||||
return UTF32BigEndian, nilIfEmpty(buf[4:]), err
|
||||
}
|
||||
if isUTF32LittleEndianBOM4(buf) {
|
||||
return UTF32LittleEndian, nilIfEmpty(buf[4:]), err
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) > 2 && isUTF8BOM3(buf) {
|
||||
return UTF8, nilIfEmpty(buf[3:]), err
|
||||
}
|
||||
|
||||
if (err != nil && err != io.EOF) || (len(buf) < 2) {
|
||||
return Unknown, nilIfEmpty(buf), err
|
||||
}
|
||||
|
||||
if isUTF16BigEndianBOM2(buf) {
|
||||
return UTF16BigEndian, nilIfEmpty(buf[2:]), err
|
||||
}
|
||||
if isUTF16LittleEndianBOM2(buf) {
|
||||
return UTF16LittleEndian, nilIfEmpty(buf[2:]), err
|
||||
}
|
||||
|
||||
return Unknown, nilIfEmpty(buf), err
|
||||
}
|
||||
|
||||
func readBOM(rd io.Reader) (buf []byte, err error) {
|
||||
const maxBOMSize = 4
|
||||
var bom [maxBOMSize]byte // used to read BOM
|
||||
|
||||
// read as many bytes as possible
|
||||
for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] {
|
||||
if n, err = rd.Read(bom[len(buf):]); n < 0 {
|
||||
panic(errNegativeRead)
|
||||
}
|
||||
if n > 0 {
|
||||
nEmpty = 0
|
||||
} else {
|
||||
nEmpty++
|
||||
if nEmpty >= maxConsecutiveEmptyReads {
|
||||
err = io.ErrNoProgress
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isUTF32BigEndianBOM4(buf []byte) bool {
|
||||
return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF
|
||||
}
|
||||
|
||||
func isUTF32LittleEndianBOM4(buf []byte) bool {
|
||||
return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00
|
||||
}
|
||||
|
||||
func isUTF8BOM3(buf []byte) bool {
|
||||
return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF
|
||||
}
|
||||
|
||||
func isUTF16BigEndianBOM2(buf []byte) bool {
|
||||
return buf[0] == 0xFE && buf[1] == 0xFF
|
||||
}
|
||||
|
||||
func isUTF16LittleEndianBOM2(buf []byte) bool {
|
||||
return buf[0] == 0xFF && buf[1] == 0xFE
|
||||
}
|
||||
|
||||
func nilIfEmpty(buf []byte) (res []byte) {
|
||||
if len(buf) > 0 {
|
||||
res = buf
|
||||
}
|
||||
return
|
||||
}
|
||||
21
vendor/github.com/go-ole/go-ole/LICENSE
generated
vendored
21
vendor/github.com/go-ole/go-ole/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright © 2013-2017 Yasuhiro Matsumoto, <mattn.jp@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the “Software”), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
329
vendor/github.com/go-ole/go-ole/com.go
generated
vendored
329
vendor/github.com/go-ole/go-ole/com.go
generated
vendored
@@ -1,329 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package ole
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"time"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
procCoInitialize, _ = modole32.FindProc("CoInitialize")
|
||||
procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx")
|
||||
procCoUninitialize, _ = modole32.FindProc("CoUninitialize")
|
||||
procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance")
|
||||
procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree")
|
||||
procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID")
|
||||
procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString")
|
||||
procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID")
|
||||
procStringFromIID, _ = modole32.FindProc("StringFromIID")
|
||||
procIIDFromString, _ = modole32.FindProc("IIDFromString")
|
||||
procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID")
|
||||
procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory")
|
||||
procVariantInit, _ = modoleaut32.FindProc("VariantInit")
|
||||
procVariantClear, _ = modoleaut32.FindProc("VariantClear")
|
||||
procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime")
|
||||
procSysAllocString, _ = modoleaut32.FindProc("SysAllocString")
|
||||
procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen")
|
||||
procSysFreeString, _ = modoleaut32.FindProc("SysFreeString")
|
||||
procSysStringLen, _ = modoleaut32.FindProc("SysStringLen")
|
||||
procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo")
|
||||
procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch")
|
||||
procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject")
|
||||
|
||||
procGetMessageW, _ = moduser32.FindProc("GetMessageW")
|
||||
procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW")
|
||||
)
|
||||
|
||||
// coInitialize initializes COM library on current thread.
|
||||
//
|
||||
// MSDN documentation suggests that this function should not be called. Call
|
||||
// CoInitializeEx() instead. The reason has to do with threading and this
|
||||
// function is only for single-threaded apartments.
|
||||
//
|
||||
// That said, most users of the library have gotten away with just this
|
||||
// function. If you are experiencing threading issues, then use
|
||||
// CoInitializeEx().
|
||||
func coInitialize() (err error) {
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx
|
||||
// Suggests that no value should be passed to CoInitialized.
|
||||
// Could just be Call() since the parameter is optional. <-- Needs testing to be sure.
|
||||
hr, _, _ := procCoInitialize.Call(uintptr(0))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// coInitializeEx initializes COM library with concurrency model.
|
||||
func coInitializeEx(coinit uint32) (err error) {
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx
|
||||
// Suggests that the first parameter is not only optional but should always be NULL.
|
||||
hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CoInitialize initializes COM library on current thread.
|
||||
//
|
||||
// MSDN documentation suggests that this function should not be called. Call
|
||||
// CoInitializeEx() instead. The reason has to do with threading and this
|
||||
// function is only for single-threaded apartments.
|
||||
//
|
||||
// That said, most users of the library have gotten away with just this
|
||||
// function. If you are experiencing threading issues, then use
|
||||
// CoInitializeEx().
|
||||
func CoInitialize(p uintptr) (err error) {
|
||||
// p is ignored and won't be used.
|
||||
// Avoid any variable not used errors.
|
||||
p = uintptr(0)
|
||||
return coInitialize()
|
||||
}
|
||||
|
||||
// CoInitializeEx initializes COM library with concurrency model.
|
||||
func CoInitializeEx(p uintptr, coinit uint32) (err error) {
|
||||
// Avoid any variable not used errors.
|
||||
p = uintptr(0)
|
||||
return coInitializeEx(coinit)
|
||||
}
|
||||
|
||||
// CoUninitialize uninitializes COM Library.
|
||||
func CoUninitialize() {
|
||||
procCoUninitialize.Call()
|
||||
}
|
||||
|
||||
// CoTaskMemFree frees memory pointer.
|
||||
func CoTaskMemFree(memptr uintptr) {
|
||||
procCoTaskMemFree.Call(memptr)
|
||||
}
|
||||
|
||||
// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier.
|
||||
//
|
||||
// The Programmatic Identifier must be registered, because it will be looked up
|
||||
// in the Windows Registry. The registry entry has the following keys: CLSID,
|
||||
// Insertable, Protocol and Shell
|
||||
// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx).
|
||||
//
|
||||
// programID identifies the class id with less precision and is not guaranteed
|
||||
// to be unique. These are usually found in the registry under
|
||||
// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of
|
||||
// "Program.Component.Version" with version being optional.
|
||||
//
|
||||
// CLSIDFromProgID in Windows API.
|
||||
func CLSIDFromProgID(progId string) (clsid *GUID, err error) {
|
||||
var guid GUID
|
||||
lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
|
||||
hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
clsid = &guid
|
||||
return
|
||||
}
|
||||
|
||||
// CLSIDFromString retrieves Class ID from string representation.
|
||||
//
|
||||
// This is technically the string version of the GUID and will convert the
|
||||
// string to object.
|
||||
//
|
||||
// CLSIDFromString in Windows API.
|
||||
func CLSIDFromString(str string) (clsid *GUID, err error) {
|
||||
var guid GUID
|
||||
lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str)))
|
||||
hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
clsid = &guid
|
||||
return
|
||||
}
|
||||
|
||||
// StringFromCLSID returns GUID formated string from GUID object.
|
||||
func StringFromCLSID(clsid *GUID) (str string, err error) {
|
||||
var p *uint16
|
||||
hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
str = LpOleStrToString(p)
|
||||
return
|
||||
}
|
||||
|
||||
// IIDFromString returns GUID from program ID.
|
||||
func IIDFromString(progId string) (clsid *GUID, err error) {
|
||||
var guid GUID
|
||||
lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
|
||||
hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
clsid = &guid
|
||||
return
|
||||
}
|
||||
|
||||
// StringFromIID returns GUID formatted string from GUID object.
|
||||
func StringFromIID(iid *GUID) (str string, err error) {
|
||||
var p *uint16
|
||||
hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
str = LpOleStrToString(p)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateInstance of single uninitialized object with GUID.
|
||||
func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
|
||||
if iid == nil {
|
||||
iid = IID_IUnknown
|
||||
}
|
||||
hr, _, _ := procCoCreateInstance.Call(
|
||||
uintptr(unsafe.Pointer(clsid)),
|
||||
0,
|
||||
CLSCTX_SERVER,
|
||||
uintptr(unsafe.Pointer(iid)),
|
||||
uintptr(unsafe.Pointer(&unk)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetActiveObject retrieves pointer to active object.
|
||||
func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
|
||||
if iid == nil {
|
||||
iid = IID_IUnknown
|
||||
}
|
||||
hr, _, _ := procGetActiveObject.Call(
|
||||
uintptr(unsafe.Pointer(clsid)),
|
||||
uintptr(unsafe.Pointer(iid)),
|
||||
uintptr(unsafe.Pointer(&unk)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// VariantInit initializes variant.
|
||||
func VariantInit(v *VARIANT) (err error) {
|
||||
hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// VariantClear clears value in Variant settings to VT_EMPTY.
|
||||
func VariantClear(v *VARIANT) (err error) {
|
||||
hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SysAllocString allocates memory for string and copies string into memory.
|
||||
func SysAllocString(v string) (ss *int16) {
|
||||
pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v))))
|
||||
ss = (*int16)(unsafe.Pointer(pss))
|
||||
return
|
||||
}
|
||||
|
||||
// SysAllocStringLen copies up to length of given string returning pointer.
|
||||
func SysAllocStringLen(v string) (ss *int16) {
|
||||
utf16 := utf16.Encode([]rune(v + "\x00"))
|
||||
ptr := &utf16[0]
|
||||
|
||||
pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1))
|
||||
ss = (*int16)(unsafe.Pointer(pss))
|
||||
return
|
||||
}
|
||||
|
||||
// SysFreeString frees string system memory. This must be called with SysAllocString.
|
||||
func SysFreeString(v *int16) (err error) {
|
||||
hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SysStringLen is the length of the system allocated string.
|
||||
func SysStringLen(v *int16) uint32 {
|
||||
l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v)))
|
||||
return uint32(l)
|
||||
}
|
||||
|
||||
// CreateStdDispatch provides default IDispatch implementation for IUnknown.
|
||||
//
|
||||
// This handles default IDispatch implementation for objects. It haves a few
|
||||
// limitations with only supporting one language. It will also only return
|
||||
// default exception codes.
|
||||
func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) {
|
||||
hr, _, _ := procCreateStdDispatch.Call(
|
||||
uintptr(unsafe.Pointer(unk)),
|
||||
v,
|
||||
uintptr(unsafe.Pointer(ptinfo)),
|
||||
uintptr(unsafe.Pointer(&disp)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch.
|
||||
//
|
||||
// This will not handle the full implementation of the interface.
|
||||
func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) {
|
||||
hr, _, _ := procCreateDispTypeInfo.Call(
|
||||
uintptr(unsafe.Pointer(idata)),
|
||||
uintptr(GetUserDefaultLCID()),
|
||||
uintptr(unsafe.Pointer(&pptinfo)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// copyMemory moves location of a block of memory.
|
||||
func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {
|
||||
procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length))
|
||||
}
|
||||
|
||||
// GetUserDefaultLCID retrieves current user default locale.
|
||||
func GetUserDefaultLCID() (lcid uint32) {
|
||||
ret, _, _ := procGetUserDefaultLCID.Call()
|
||||
lcid = uint32(ret)
|
||||
return
|
||||
}
|
||||
|
||||
// GetMessage in message queue from runtime.
|
||||
//
|
||||
// This function appears to block. PeekMessage does not block.
|
||||
func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) {
|
||||
r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax))
|
||||
ret = int32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// DispatchMessage to window procedure.
|
||||
func DispatchMessage(msg *Msg) (ret int32) {
|
||||
r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg)))
|
||||
ret = int32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// GetVariantDate converts COM Variant Time value to Go time.Time.
|
||||
func GetVariantDate(value float64) (time.Time, error) {
|
||||
var st syscall.Systemtime
|
||||
r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st)))
|
||||
if r != 0 {
|
||||
return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
|
||||
}
|
||||
return time.Now(), errors.New("Could not convert to time, passing current time.")
|
||||
}
|
||||
174
vendor/github.com/go-ole/go-ole/com_func.go
generated
vendored
174
vendor/github.com/go-ole/go-ole/com_func.go
generated
vendored
@@ -1,174 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package ole
|
||||
|
||||
import (
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// coInitialize initializes COM library on current thread.
|
||||
//
|
||||
// MSDN documentation suggests that this function should not be called. Call
|
||||
// CoInitializeEx() instead. The reason has to do with threading and this
|
||||
// function is only for single-threaded apartments.
|
||||
//
|
||||
// That said, most users of the library have gotten away with just this
|
||||
// function. If you are experiencing threading issues, then use
|
||||
// CoInitializeEx().
|
||||
func coInitialize() error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// coInitializeEx initializes COM library with concurrency model.
|
||||
func coInitializeEx(coinit uint32) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// CoInitialize initializes COM library on current thread.
|
||||
//
|
||||
// MSDN documentation suggests that this function should not be called. Call
|
||||
// CoInitializeEx() instead. The reason has to do with threading and this
|
||||
// function is only for single-threaded apartments.
|
||||
//
|
||||
// That said, most users of the library have gotten away with just this
|
||||
// function. If you are experiencing threading issues, then use
|
||||
// CoInitializeEx().
|
||||
func CoInitialize(p uintptr) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// CoInitializeEx initializes COM library with concurrency model.
|
||||
func CoInitializeEx(p uintptr, coinit uint32) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// CoUninitialize uninitializes COM Library.
|
||||
func CoUninitialize() {}
|
||||
|
||||
// CoTaskMemFree frees memory pointer.
|
||||
func CoTaskMemFree(memptr uintptr) {}
|
||||
|
||||
// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier.
|
||||
//
|
||||
// The Programmatic Identifier must be registered, because it will be looked up
|
||||
// in the Windows Registry. The registry entry has the following keys: CLSID,
|
||||
// Insertable, Protocol and Shell
|
||||
// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx).
|
||||
//
|
||||
// programID identifies the class id with less precision and is not guaranteed
|
||||
// to be unique. These are usually found in the registry under
|
||||
// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of
|
||||
// "Program.Component.Version" with version being optional.
|
||||
//
|
||||
// CLSIDFromProgID in Windows API.
|
||||
func CLSIDFromProgID(progId string) (*GUID, error) {
|
||||
return nil, NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// CLSIDFromString retrieves Class ID from string representation.
|
||||
//
|
||||
// This is technically the string version of the GUID and will convert the
|
||||
// string to object.
|
||||
//
|
||||
// CLSIDFromString in Windows API.
|
||||
func CLSIDFromString(str string) (*GUID, error) {
|
||||
return nil, NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// StringFromCLSID returns GUID formated string from GUID object.
|
||||
func StringFromCLSID(clsid *GUID) (string, error) {
|
||||
return "", NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// IIDFromString returns GUID from program ID.
|
||||
func IIDFromString(progId string) (*GUID, error) {
|
||||
return nil, NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// StringFromIID returns GUID formatted string from GUID object.
|
||||
func StringFromIID(iid *GUID) (string, error) {
|
||||
return "", NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// CreateInstance of single uninitialized object with GUID.
|
||||
func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) {
|
||||
return nil, NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// GetActiveObject retrieves pointer to active object.
|
||||
func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) {
|
||||
return nil, NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// VariantInit initializes variant.
|
||||
func VariantInit(v *VARIANT) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// VariantClear clears value in Variant settings to VT_EMPTY.
|
||||
func VariantClear(v *VARIANT) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// SysAllocString allocates memory for string and copies string into memory.
|
||||
func SysAllocString(v string) *int16 {
|
||||
u := int16(0)
|
||||
return &u
|
||||
}
|
||||
|
||||
// SysAllocStringLen copies up to length of given string returning pointer.
|
||||
func SysAllocStringLen(v string) *int16 {
|
||||
u := int16(0)
|
||||
return &u
|
||||
}
|
||||
|
||||
// SysFreeString frees string system memory. This must be called with SysAllocString.
|
||||
func SysFreeString(v *int16) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// SysStringLen is the length of the system allocated string.
|
||||
func SysStringLen(v *int16) uint32 {
|
||||
return uint32(0)
|
||||
}
|
||||
|
||||
// CreateStdDispatch provides default IDispatch implementation for IUnknown.
|
||||
//
|
||||
// This handles default IDispatch implementation for objects. It haves a few
|
||||
// limitations with only supporting one language. It will also only return
|
||||
// default exception codes.
|
||||
func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) {
|
||||
return nil, NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch.
|
||||
//
|
||||
// This will not handle the full implementation of the interface.
|
||||
func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) {
|
||||
return nil, NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// copyMemory moves location of a block of memory.
|
||||
func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {}
|
||||
|
||||
// GetUserDefaultLCID retrieves current user default locale.
|
||||
func GetUserDefaultLCID() uint32 {
|
||||
return uint32(0)
|
||||
}
|
||||
|
||||
// GetMessage in message queue from runtime.
|
||||
//
|
||||
// This function appears to block. PeekMessage does not block.
|
||||
func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) {
|
||||
return int32(0), NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
// DispatchMessage to window procedure.
|
||||
func DispatchMessage(msg *Msg) int32 {
|
||||
return int32(0)
|
||||
}
|
||||
|
||||
func GetVariantDate(value float64) (time.Time, error) {
|
||||
return time.Now(), NewError(E_NOTIMPL)
|
||||
}
|
||||
192
vendor/github.com/go-ole/go-ole/connect.go
generated
vendored
192
vendor/github.com/go-ole/go-ole/connect.go
generated
vendored
@@ -1,192 +0,0 @@
|
||||
package ole
|
||||
|
||||
// Connection contains IUnknown for fluent interface interaction.
|
||||
//
|
||||
// Deprecated. Use oleutil package instead.
|
||||
type Connection struct {
|
||||
Object *IUnknown // Access COM
|
||||
}
|
||||
|
||||
// Initialize COM.
|
||||
func (*Connection) Initialize() (err error) {
|
||||
return coInitialize()
|
||||
}
|
||||
|
||||
// Uninitialize COM.
|
||||
func (*Connection) Uninitialize() {
|
||||
CoUninitialize()
|
||||
}
|
||||
|
||||
// Create IUnknown object based first on ProgId and then from String.
|
||||
func (c *Connection) Create(progId string) (err error) {
|
||||
var clsid *GUID
|
||||
clsid, err = CLSIDFromProgID(progId)
|
||||
if err != nil {
|
||||
clsid, err = CLSIDFromString(progId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
unknown, err := CreateInstance(clsid, IID_IUnknown)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.Object = unknown
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Release IUnknown object.
|
||||
func (c *Connection) Release() {
|
||||
c.Object.Release()
|
||||
}
|
||||
|
||||
// Load COM object from list of programIDs or strings.
|
||||
func (c *Connection) Load(names ...string) (errors []error) {
|
||||
var tempErrors []error = make([]error, len(names))
|
||||
var numErrors int = 0
|
||||
for _, name := range names {
|
||||
err := c.Create(name)
|
||||
if err != nil {
|
||||
tempErrors = append(tempErrors, err)
|
||||
numErrors += 1
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
copy(errors, tempErrors[0:numErrors])
|
||||
return
|
||||
}
|
||||
|
||||
// Dispatch returns Dispatch object.
|
||||
func (c *Connection) Dispatch() (object *Dispatch, err error) {
|
||||
dispatch, err := c.Object.QueryInterface(IID_IDispatch)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
object = &Dispatch{dispatch}
|
||||
return
|
||||
}
|
||||
|
||||
// Dispatch stores IDispatch object.
|
||||
type Dispatch struct {
|
||||
Object *IDispatch // Dispatch object.
|
||||
}
|
||||
|
||||
// Call method on IDispatch with parameters.
|
||||
func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) {
|
||||
id, err := d.GetId(method)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err = d.Invoke(id, DISPATCH_METHOD, params)
|
||||
return
|
||||
}
|
||||
|
||||
// MustCall method on IDispatch with parameters.
|
||||
func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) {
|
||||
id, err := d.GetId(method)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
result, err = d.Invoke(id, DISPATCH_METHOD, params)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get property on IDispatch with parameters.
|
||||
func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) {
|
||||
id, err := d.GetId(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params)
|
||||
return
|
||||
}
|
||||
|
||||
// MustGet property on IDispatch with parameters.
|
||||
func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) {
|
||||
id, err := d.GetId(name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Set property on IDispatch with parameters.
|
||||
func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) {
|
||||
id, err := d.GetId(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params)
|
||||
return
|
||||
}
|
||||
|
||||
// MustSet property on IDispatch with parameters.
|
||||
func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) {
|
||||
id, err := d.GetId(name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetId retrieves ID of name on IDispatch.
|
||||
func (d *Dispatch) GetId(name string) (id int32, err error) {
|
||||
var dispid []int32
|
||||
dispid, err = d.Object.GetIDsOfName([]string{name})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
id = dispid[0]
|
||||
return
|
||||
}
|
||||
|
||||
// GetIds retrieves all IDs of names on IDispatch.
|
||||
func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) {
|
||||
dispid, err = d.Object.GetIDsOfName(names)
|
||||
return
|
||||
}
|
||||
|
||||
// Invoke IDispatch on DisplayID of dispatch type with parameters.
|
||||
//
|
||||
// There have been problems where if send cascading params..., it would error
|
||||
// out because the parameters would be empty.
|
||||
func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) {
|
||||
if len(params) < 1 {
|
||||
result, err = d.Object.Invoke(id, dispatch)
|
||||
} else {
|
||||
result, err = d.Object.Invoke(id, dispatch, params...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Release IDispatch object.
|
||||
func (d *Dispatch) Release() {
|
||||
d.Object.Release()
|
||||
}
|
||||
|
||||
// Connect initializes COM and attempts to load IUnknown based on given names.
|
||||
func Connect(names ...string) (connection *Connection) {
|
||||
connection.Initialize()
|
||||
connection.Load(names...)
|
||||
return
|
||||
}
|
||||
153
vendor/github.com/go-ole/go-ole/constants.go
generated
vendored
153
vendor/github.com/go-ole/go-ole/constants.go
generated
vendored
@@ -1,153 +0,0 @@
|
||||
package ole
|
||||
|
||||
const (
|
||||
CLSCTX_INPROC_SERVER = 1
|
||||
CLSCTX_INPROC_HANDLER = 2
|
||||
CLSCTX_LOCAL_SERVER = 4
|
||||
CLSCTX_INPROC_SERVER16 = 8
|
||||
CLSCTX_REMOTE_SERVER = 16
|
||||
CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER
|
||||
CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER
|
||||
CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER
|
||||
)
|
||||
|
||||
const (
|
||||
COINIT_APARTMENTTHREADED = 0x2
|
||||
COINIT_MULTITHREADED = 0x0
|
||||
COINIT_DISABLE_OLE1DDE = 0x4
|
||||
COINIT_SPEED_OVER_MEMORY = 0x8
|
||||
)
|
||||
|
||||
const (
|
||||
DISPATCH_METHOD = 1
|
||||
DISPATCH_PROPERTYGET = 2
|
||||
DISPATCH_PROPERTYPUT = 4
|
||||
DISPATCH_PROPERTYPUTREF = 8
|
||||
)
|
||||
|
||||
const (
|
||||
S_OK = 0x00000000
|
||||
E_UNEXPECTED = 0x8000FFFF
|
||||
E_NOTIMPL = 0x80004001
|
||||
E_OUTOFMEMORY = 0x8007000E
|
||||
E_INVALIDARG = 0x80070057
|
||||
E_NOINTERFACE = 0x80004002
|
||||
E_POINTER = 0x80004003
|
||||
E_HANDLE = 0x80070006
|
||||
E_ABORT = 0x80004004
|
||||
E_FAIL = 0x80004005
|
||||
E_ACCESSDENIED = 0x80070005
|
||||
E_PENDING = 0x8000000A
|
||||
|
||||
CO_E_CLASSSTRING = 0x800401F3
|
||||
)
|
||||
|
||||
const (
|
||||
CC_FASTCALL = iota
|
||||
CC_CDECL
|
||||
CC_MSCPASCAL
|
||||
CC_PASCAL = CC_MSCPASCAL
|
||||
CC_MACPASCAL
|
||||
CC_STDCALL
|
||||
CC_FPFASTCALL
|
||||
CC_SYSCALL
|
||||
CC_MPWCDECL
|
||||
CC_MPWPASCAL
|
||||
CC_MAX = CC_MPWPASCAL
|
||||
)
|
||||
|
||||
type VT uint16
|
||||
|
||||
const (
|
||||
VT_EMPTY VT = 0x0
|
||||
VT_NULL VT = 0x1
|
||||
VT_I2 VT = 0x2
|
||||
VT_I4 VT = 0x3
|
||||
VT_R4 VT = 0x4
|
||||
VT_R8 VT = 0x5
|
||||
VT_CY VT = 0x6
|
||||
VT_DATE VT = 0x7
|
||||
VT_BSTR VT = 0x8
|
||||
VT_DISPATCH VT = 0x9
|
||||
VT_ERROR VT = 0xa
|
||||
VT_BOOL VT = 0xb
|
||||
VT_VARIANT VT = 0xc
|
||||
VT_UNKNOWN VT = 0xd
|
||||
VT_DECIMAL VT = 0xe
|
||||
VT_I1 VT = 0x10
|
||||
VT_UI1 VT = 0x11
|
||||
VT_UI2 VT = 0x12
|
||||
VT_UI4 VT = 0x13
|
||||
VT_I8 VT = 0x14
|
||||
VT_UI8 VT = 0x15
|
||||
VT_INT VT = 0x16
|
||||
VT_UINT VT = 0x17
|
||||
VT_VOID VT = 0x18
|
||||
VT_HRESULT VT = 0x19
|
||||
VT_PTR VT = 0x1a
|
||||
VT_SAFEARRAY VT = 0x1b
|
||||
VT_CARRAY VT = 0x1c
|
||||
VT_USERDEFINED VT = 0x1d
|
||||
VT_LPSTR VT = 0x1e
|
||||
VT_LPWSTR VT = 0x1f
|
||||
VT_RECORD VT = 0x24
|
||||
VT_INT_PTR VT = 0x25
|
||||
VT_UINT_PTR VT = 0x26
|
||||
VT_FILETIME VT = 0x40
|
||||
VT_BLOB VT = 0x41
|
||||
VT_STREAM VT = 0x42
|
||||
VT_STORAGE VT = 0x43
|
||||
VT_STREAMED_OBJECT VT = 0x44
|
||||
VT_STORED_OBJECT VT = 0x45
|
||||
VT_BLOB_OBJECT VT = 0x46
|
||||
VT_CF VT = 0x47
|
||||
VT_CLSID VT = 0x48
|
||||
VT_BSTR_BLOB VT = 0xfff
|
||||
VT_VECTOR VT = 0x1000
|
||||
VT_ARRAY VT = 0x2000
|
||||
VT_BYREF VT = 0x4000
|
||||
VT_RESERVED VT = 0x8000
|
||||
VT_ILLEGAL VT = 0xffff
|
||||
VT_ILLEGALMASKED VT = 0xfff
|
||||
VT_TYPEMASK VT = 0xfff
|
||||
)
|
||||
|
||||
const (
|
||||
DISPID_UNKNOWN = -1
|
||||
DISPID_VALUE = 0
|
||||
DISPID_PROPERTYPUT = -3
|
||||
DISPID_NEWENUM = -4
|
||||
DISPID_EVALUATE = -5
|
||||
DISPID_CONSTRUCTOR = -6
|
||||
DISPID_DESTRUCTOR = -7
|
||||
DISPID_COLLECT = -8
|
||||
)
|
||||
|
||||
const (
|
||||
TKIND_ENUM = 1
|
||||
TKIND_RECORD = 2
|
||||
TKIND_MODULE = 3
|
||||
TKIND_INTERFACE = 4
|
||||
TKIND_DISPATCH = 5
|
||||
TKIND_COCLASS = 6
|
||||
TKIND_ALIAS = 7
|
||||
TKIND_UNION = 8
|
||||
TKIND_MAX = 9
|
||||
)
|
||||
|
||||
// Safe Array Feature Flags
|
||||
|
||||
const (
|
||||
FADF_AUTO = 0x0001
|
||||
FADF_STATIC = 0x0002
|
||||
FADF_EMBEDDED = 0x0004
|
||||
FADF_FIXEDSIZE = 0x0010
|
||||
FADF_RECORD = 0x0020
|
||||
FADF_HAVEIID = 0x0040
|
||||
FADF_HAVEVARTYPE = 0x0080
|
||||
FADF_BSTR = 0x0100
|
||||
FADF_UNKNOWN = 0x0200
|
||||
FADF_DISPATCH = 0x0400
|
||||
FADF_VARIANT = 0x0800
|
||||
FADF_RESERVED = 0xF008
|
||||
)
|
||||
51
vendor/github.com/go-ole/go-ole/error.go
generated
vendored
51
vendor/github.com/go-ole/go-ole/error.go
generated
vendored
@@ -1,51 +0,0 @@
|
||||
package ole
|
||||
|
||||
// OleError stores COM errors.
|
||||
type OleError struct {
|
||||
hr uintptr
|
||||
description string
|
||||
subError error
|
||||
}
|
||||
|
||||
// NewError creates new error with HResult.
|
||||
func NewError(hr uintptr) *OleError {
|
||||
return &OleError{hr: hr}
|
||||
}
|
||||
|
||||
// NewErrorWithDescription creates new COM error with HResult and description.
|
||||
func NewErrorWithDescription(hr uintptr, description string) *OleError {
|
||||
return &OleError{hr: hr, description: description}
|
||||
}
|
||||
|
||||
// NewErrorWithSubError creates new COM error with parent error.
|
||||
func NewErrorWithSubError(hr uintptr, description string, err error) *OleError {
|
||||
return &OleError{hr: hr, description: description, subError: err}
|
||||
}
|
||||
|
||||
// Code is the HResult.
|
||||
func (v *OleError) Code() uintptr {
|
||||
return uintptr(v.hr)
|
||||
}
|
||||
|
||||
// String description, either manually set or format message with error code.
|
||||
func (v *OleError) String() string {
|
||||
if v.description != "" {
|
||||
return errstr(int(v.hr)) + " (" + v.description + ")"
|
||||
}
|
||||
return errstr(int(v.hr))
|
||||
}
|
||||
|
||||
// Error implements error interface.
|
||||
func (v *OleError) Error() string {
|
||||
return v.String()
|
||||
}
|
||||
|
||||
// Description retrieves error summary, if there is one.
|
||||
func (v *OleError) Description() string {
|
||||
return v.description
|
||||
}
|
||||
|
||||
// SubError returns parent error, if there is one.
|
||||
func (v *OleError) SubError() error {
|
||||
return v.subError
|
||||
}
|
||||
8
vendor/github.com/go-ole/go-ole/error_func.go
generated
vendored
8
vendor/github.com/go-ole/go-ole/error_func.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package ole
|
||||
|
||||
// errstr converts error code to string.
|
||||
func errstr(errno int) string {
|
||||
return ""
|
||||
}
|
||||
24
vendor/github.com/go-ole/go-ole/error_windows.go
generated
vendored
24
vendor/github.com/go-ole/go-ole/error_windows.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package ole
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// errstr converts error code to string.
|
||||
func errstr(errno int) string {
|
||||
// ask windows for the remaining errors
|
||||
var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS
|
||||
b := make([]uint16, 300)
|
||||
n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err)
|
||||
}
|
||||
// trim terminating \r and \n
|
||||
for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- {
|
||||
}
|
||||
return string(utf16.Decode(b[:n]))
|
||||
}
|
||||
284
vendor/github.com/go-ole/go-ole/guid.go
generated
vendored
284
vendor/github.com/go-ole/go-ole/guid.go
generated
vendored
@@ -1,284 +0,0 @@
|
||||
package ole
|
||||
|
||||
var (
|
||||
// IID_NULL is null Interface ID, used when no other Interface ID is known.
|
||||
IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}")
|
||||
|
||||
// IID_IUnknown is for IUnknown interfaces.
|
||||
IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}")
|
||||
|
||||
// IID_IDispatch is for IDispatch interfaces.
|
||||
IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}")
|
||||
|
||||
// IID_IEnumVariant is for IEnumVariant interfaces
|
||||
IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}")
|
||||
|
||||
// IID_IConnectionPointContainer is for IConnectionPointContainer interfaces.
|
||||
IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}")
|
||||
|
||||
// IID_IConnectionPoint is for IConnectionPoint interfaces.
|
||||
IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}")
|
||||
|
||||
// IID_IInspectable is for IInspectable interfaces.
|
||||
IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}")
|
||||
|
||||
// IID_IProvideClassInfo is for IProvideClassInfo interfaces.
|
||||
IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}")
|
||||
)
|
||||
|
||||
// These are for testing and not part of any library.
|
||||
var (
|
||||
// IID_ICOMTestString is for ICOMTestString interfaces.
|
||||
//
|
||||
// {E0133EB4-C36F-469A-9D3D-C66B84BE19ED}
|
||||
IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}")
|
||||
|
||||
// IID_ICOMTestInt8 is for ICOMTestInt8 interfaces.
|
||||
//
|
||||
// {BEB06610-EB84-4155-AF58-E2BFF53680B4}
|
||||
IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}")
|
||||
|
||||
// IID_ICOMTestInt16 is for ICOMTestInt16 interfaces.
|
||||
//
|
||||
// {DAA3F9FA-761E-4976-A860-8364CE55F6FC}
|
||||
IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}")
|
||||
|
||||
// IID_ICOMTestInt32 is for ICOMTestInt32 interfaces.
|
||||
//
|
||||
// {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}
|
||||
IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}")
|
||||
|
||||
// IID_ICOMTestInt64 is for ICOMTestInt64 interfaces.
|
||||
//
|
||||
// {8D437CBC-B3ED-485C-BC32-C336432A1623}
|
||||
IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}")
|
||||
|
||||
// IID_ICOMTestFloat is for ICOMTestFloat interfaces.
|
||||
//
|
||||
// {BF1ED004-EA02-456A-AA55-2AC8AC6B054C}
|
||||
IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}")
|
||||
|
||||
// IID_ICOMTestDouble is for ICOMTestDouble interfaces.
|
||||
//
|
||||
// {BF908A81-8687-4E93-999F-D86FAB284BA0}
|
||||
IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}")
|
||||
|
||||
// IID_ICOMTestBoolean is for ICOMTestBoolean interfaces.
|
||||
//
|
||||
// {D530E7A6-4EE8-40D1-8931-3D63B8605010}
|
||||
IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}")
|
||||
|
||||
// IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces.
|
||||
//
|
||||
// {6485B1EF-D780-4834-A4FE-1EBB51746CA3}
|
||||
IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}")
|
||||
|
||||
// IID_ICOMTestTypes is for ICOMTestTypes interfaces.
|
||||
//
|
||||
// {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}
|
||||
IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}")
|
||||
|
||||
// CLSID_COMEchoTestObject is for COMEchoTestObject class.
|
||||
//
|
||||
// {3C24506A-AE9E-4D50-9157-EF317281F1B0}
|
||||
CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}")
|
||||
|
||||
// CLSID_COMTestScalarClass is for COMTestScalarClass class.
|
||||
//
|
||||
// {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}
|
||||
CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}")
|
||||
)
|
||||
|
||||
const hextable = "0123456789ABCDEF"
|
||||
const emptyGUID = "{00000000-0000-0000-0000-000000000000}"
|
||||
|
||||
// GUID is Windows API specific GUID type.
|
||||
//
|
||||
// This exists to match Windows GUID type for direct passing for COM.
|
||||
// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx.
|
||||
type GUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
|
||||
// NewGUID converts the given string into a globally unique identifier that is
|
||||
// compliant with the Windows API.
|
||||
//
|
||||
// The supplied string may be in any of these formats:
|
||||
//
|
||||
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
|
||||
//
|
||||
// The conversion of the supplied string is not case-sensitive.
|
||||
func NewGUID(guid string) *GUID {
|
||||
d := []byte(guid)
|
||||
var d1, d2, d3, d4a, d4b []byte
|
||||
|
||||
switch len(d) {
|
||||
case 38:
|
||||
if d[0] != '{' || d[37] != '}' {
|
||||
return nil
|
||||
}
|
||||
d = d[1:37]
|
||||
fallthrough
|
||||
case 36:
|
||||
if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' {
|
||||
return nil
|
||||
}
|
||||
d1 = d[0:8]
|
||||
d2 = d[9:13]
|
||||
d3 = d[14:18]
|
||||
d4a = d[19:23]
|
||||
d4b = d[24:36]
|
||||
case 32:
|
||||
d1 = d[0:8]
|
||||
d2 = d[8:12]
|
||||
d3 = d[12:16]
|
||||
d4a = d[16:20]
|
||||
d4b = d[20:32]
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
var g GUID
|
||||
var ok1, ok2, ok3, ok4 bool
|
||||
g.Data1, ok1 = decodeHexUint32(d1)
|
||||
g.Data2, ok2 = decodeHexUint16(d2)
|
||||
g.Data3, ok3 = decodeHexUint16(d3)
|
||||
g.Data4, ok4 = decodeHexByte64(d4a, d4b)
|
||||
if ok1 && ok2 && ok3 && ok4 {
|
||||
return &g
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeHexUint32(src []byte) (value uint32, ok bool) {
|
||||
var b1, b2, b3, b4 byte
|
||||
var ok1, ok2, ok3, ok4 bool
|
||||
b1, ok1 = decodeHexByte(src[0], src[1])
|
||||
b2, ok2 = decodeHexByte(src[2], src[3])
|
||||
b3, ok3 = decodeHexByte(src[4], src[5])
|
||||
b4, ok4 = decodeHexByte(src[6], src[7])
|
||||
value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4)
|
||||
ok = ok1 && ok2 && ok3 && ok4
|
||||
return
|
||||
}
|
||||
|
||||
func decodeHexUint16(src []byte) (value uint16, ok bool) {
|
||||
var b1, b2 byte
|
||||
var ok1, ok2 bool
|
||||
b1, ok1 = decodeHexByte(src[0], src[1])
|
||||
b2, ok2 = decodeHexByte(src[2], src[3])
|
||||
value = (uint16(b1) << 8) | uint16(b2)
|
||||
ok = ok1 && ok2
|
||||
return
|
||||
}
|
||||
|
||||
func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) {
|
||||
var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool
|
||||
value[0], ok1 = decodeHexByte(s1[0], s1[1])
|
||||
value[1], ok2 = decodeHexByte(s1[2], s1[3])
|
||||
value[2], ok3 = decodeHexByte(s2[0], s2[1])
|
||||
value[3], ok4 = decodeHexByte(s2[2], s2[3])
|
||||
value[4], ok5 = decodeHexByte(s2[4], s2[5])
|
||||
value[5], ok6 = decodeHexByte(s2[6], s2[7])
|
||||
value[6], ok7 = decodeHexByte(s2[8], s2[9])
|
||||
value[7], ok8 = decodeHexByte(s2[10], s2[11])
|
||||
ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8
|
||||
return
|
||||
}
|
||||
|
||||
func decodeHexByte(c1, c2 byte) (value byte, ok bool) {
|
||||
var n1, n2 byte
|
||||
var ok1, ok2 bool
|
||||
n1, ok1 = decodeHexChar(c1)
|
||||
n2, ok2 = decodeHexChar(c2)
|
||||
value = (n1 << 4) | n2
|
||||
ok = ok1 && ok2
|
||||
return
|
||||
}
|
||||
|
||||
func decodeHexChar(c byte) (byte, bool) {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// String converts the GUID to string form. It will adhere to this pattern:
|
||||
//
|
||||
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
|
||||
//
|
||||
// If the GUID is nil, the string representation of an empty GUID is returned:
|
||||
//
|
||||
// {00000000-0000-0000-0000-000000000000}
|
||||
func (guid *GUID) String() string {
|
||||
if guid == nil {
|
||||
return emptyGUID
|
||||
}
|
||||
|
||||
var c [38]byte
|
||||
c[0] = '{'
|
||||
putUint32Hex(c[1:9], guid.Data1)
|
||||
c[9] = '-'
|
||||
putUint16Hex(c[10:14], guid.Data2)
|
||||
c[14] = '-'
|
||||
putUint16Hex(c[15:19], guid.Data3)
|
||||
c[19] = '-'
|
||||
putByteHex(c[20:24], guid.Data4[0:2])
|
||||
c[24] = '-'
|
||||
putByteHex(c[25:37], guid.Data4[2:8])
|
||||
c[37] = '}'
|
||||
return string(c[:])
|
||||
}
|
||||
|
||||
func putUint32Hex(b []byte, v uint32) {
|
||||
b[0] = hextable[byte(v>>24)>>4]
|
||||
b[1] = hextable[byte(v>>24)&0x0f]
|
||||
b[2] = hextable[byte(v>>16)>>4]
|
||||
b[3] = hextable[byte(v>>16)&0x0f]
|
||||
b[4] = hextable[byte(v>>8)>>4]
|
||||
b[5] = hextable[byte(v>>8)&0x0f]
|
||||
b[6] = hextable[byte(v)>>4]
|
||||
b[7] = hextable[byte(v)&0x0f]
|
||||
}
|
||||
|
||||
func putUint16Hex(b []byte, v uint16) {
|
||||
b[0] = hextable[byte(v>>8)>>4]
|
||||
b[1] = hextable[byte(v>>8)&0x0f]
|
||||
b[2] = hextable[byte(v)>>4]
|
||||
b[3] = hextable[byte(v)&0x0f]
|
||||
}
|
||||
|
||||
func putByteHex(dst, src []byte) {
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i*2] = hextable[src[i]>>4]
|
||||
dst[i*2+1] = hextable[src[i]&0x0f]
|
||||
}
|
||||
}
|
||||
|
||||
// IsEqualGUID compares two GUID.
|
||||
//
|
||||
// Not constant time comparison.
|
||||
func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool {
|
||||
return guid1.Data1 == guid2.Data1 &&
|
||||
guid1.Data2 == guid2.Data2 &&
|
||||
guid1.Data3 == guid2.Data3 &&
|
||||
guid1.Data4[0] == guid2.Data4[0] &&
|
||||
guid1.Data4[1] == guid2.Data4[1] &&
|
||||
guid1.Data4[2] == guid2.Data4[2] &&
|
||||
guid1.Data4[3] == guid2.Data4[3] &&
|
||||
guid1.Data4[4] == guid2.Data4[4] &&
|
||||
guid1.Data4[5] == guid2.Data4[5] &&
|
||||
guid1.Data4[6] == guid2.Data4[6] &&
|
||||
guid1.Data4[7] == guid2.Data4[7]
|
||||
}
|
||||
20
vendor/github.com/go-ole/go-ole/iconnectionpoint.go
generated
vendored
20
vendor/github.com/go-ole/go-ole/iconnectionpoint.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
package ole
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type IConnectionPoint struct {
|
||||
IUnknown
|
||||
}
|
||||
|
||||
type IConnectionPointVtbl struct {
|
||||
IUnknownVtbl
|
||||
GetConnectionInterface uintptr
|
||||
GetConnectionPointContainer uintptr
|
||||
Advise uintptr
|
||||
Unadvise uintptr
|
||||
EnumConnections uintptr
|
||||
}
|
||||
|
||||
func (v *IConnectionPoint) VTable() *IConnectionPointVtbl {
|
||||
return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable))
|
||||
}
|
||||
21
vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go
generated
vendored
21
vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package ole
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 {
|
||||
return int32(0)
|
||||
}
|
||||
|
||||
func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) {
|
||||
return uint32(0), NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
func (v *IConnectionPoint) Unadvise(cookie uint32) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
43
vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go
generated
vendored
43
vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package ole
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 {
|
||||
// XXX: This doesn't look like it does what it's supposed to
|
||||
return release((*IUnknown)(unsafe.Pointer(v)))
|
||||
}
|
||||
|
||||
func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) {
|
||||
hr, _, _ := syscall.Syscall(
|
||||
v.VTable().Advise,
|
||||
3,
|
||||
uintptr(unsafe.Pointer(v)),
|
||||
uintptr(unsafe.Pointer(unknown)),
|
||||
uintptr(unsafe.Pointer(&cookie)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) {
|
||||
hr, _, _ := syscall.Syscall(
|
||||
v.VTable().Unadvise,
|
||||
2,
|
||||
uintptr(unsafe.Pointer(v)),
|
||||
uintptr(cookie),
|
||||
0)
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
17
vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go
generated
vendored
17
vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
package ole
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type IConnectionPointContainer struct {
|
||||
IUnknown
|
||||
}
|
||||
|
||||
type IConnectionPointContainerVtbl struct {
|
||||
IUnknownVtbl
|
||||
EnumConnectionPoints uintptr
|
||||
FindConnectionPoint uintptr
|
||||
}
|
||||
|
||||
func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl {
|
||||
return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable))
|
||||
}
|
||||
11
vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go
generated
vendored
11
vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package ole
|
||||
|
||||
func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
25
vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go
generated
vendored
25
vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package ole
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error {
|
||||
return NewError(E_NOTIMPL)
|
||||
}
|
||||
|
||||
func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) {
|
||||
hr, _, _ := syscall.Syscall(
|
||||
v.VTable().FindConnectionPoint,
|
||||
3,
|
||||
uintptr(unsafe.Pointer(v)),
|
||||
uintptr(unsafe.Pointer(iid)),
|
||||
uintptr(unsafe.Pointer(point)))
|
||||
if hr != 0 {
|
||||
err = NewError(hr)
|
||||
}
|
||||
return
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user