Compare commits

...

48 Commits

Author SHA1 Message Date
Yang Zhao (he/him)
b26ae86992 docs: Update textfile collector defaults inclusion (#1722) 2024-11-08 21:22:46 +00:00
dependabot[bot]
004f8da0ef chore(deps): bump github.com/Microsoft/hcsshim from 0.12.8 to 0.12.9 (#1716)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-04 12:34:07 +01:00
dependabot[bot]
65f41b3582 chore(deps): bump github.com/prometheus/exporter-toolkit from 0.13.0 to 0.13.1 (#1717)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-04 12:33:58 +01:00
Jan-Otto Kröpke
be67d853aa cpu: implement int32 counter for PDH calls as well. (#1715) 2024-11-03 19:24:36 +01:00
Jan-Otto Kröpke
bf233ad3e3 mi: replace all WMI calls with MI calls (#1714) 2024-11-03 17:23:26 +01:00
Jan-Otto Kröpke
45d3eabab9 mi: Revert "replace all WMI calls with MI calls" (#1713) 2024-11-03 11:20:46 +01:00
Jan-Otto Kröpke
c4f5d58a3e mi: replace all WMI calls with MI calls (#1700) 2024-11-03 01:03:34 +01:00
dependabot[bot]
582d8dd29c chore(deps): bump github.com/Microsoft/hcsshim from 0.12.7 to 0.12.8 (#1707)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-28 13:53:32 +01:00
dependabot[bot]
abc5388cab chore(deps): bump github.com/prometheus/common from 0.60.0 to 0.60.1 (#1708)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-28 12:40:17 +01:00
dependabot[bot]
fd9eb6d877 chore(deps): bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 (#1701)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <github@jkroepke.de>
2024-10-24 18:57:53 +02:00
poly
bad8ba225a docs: Add log.file argument to README (#1702) 2024-10-23 16:26:09 +02:00
Jan-Otto Kröpke
ce2df385a6 tcp: fix panic on collector (#1699) 2024-10-17 02:29:53 +02:00
Jan-Otto Kröpke
92b7e445e1 fix: fail, if unknown collector is defined in enabled list (#1693) 2024-10-17 01:17:57 +02:00
Jan-Otto Kröpke
332b0a8a1c update: rename updates collector to update (#1692) 2024-10-15 13:55:16 +02:00
Niko Ehrenfeuchter
73755b8bfe docs: add actual Windows Update service name (#1690) 2024-10-15 13:55:07 +02:00
dependabot[bot]
25d90212ce chore(deps): bump github.com/bmatcuk/doublestar/v4 from 4.6.1 to 4.7.1 (#1689)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-14 13:38:02 +02:00
Jan-Otto Kröpke
d1517d8398 logon: BREAKING: replace wmi query by Win32 API calls and expose detailed logon sessions. (click PR for more information) (#1687) 2024-10-13 10:19:41 +02:00
Jan-Otto Kröpke
7500ad6a83 *: Remove teradici_pcoip and vmware_blast collector (#1686) 2024-10-12 21:27:26 +02:00
Jan-Otto Kröpke
a0159b333e exchange: Use new collector interface (#1685) 2024-10-12 18:09:05 +02:00
Jan-Otto Kröpke
22fdb33b4c chore: optimize registry collector (#1683) 2024-10-11 00:18:36 +02:00
Jan-Otto Kröpke
f46f9082f9 memory: Implement perfdata collector (#1676) 2024-10-10 21:48:21 +02:00
Jan-Otto Kröpke
028f7aa823 *: don't exclude resources, if empty exclude is empty. (#1680) 2024-10-10 21:44:04 +02:00
Jan-Otto Kröpke
545bf77326 Update pr-check.yaml
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2024-10-10 21:06:37 +02:00
Niko Ehrenfeuchter
90ac0b269e chore(docs): fix link to update collector (#1682) 2024-10-10 17:35:36 +02:00
dependabot[bot]
72df5154fc chore(deps): bump github.com/prometheus/common from 0.59.2-0.20240918152650-14bac55a992f to 0.60.0 (#1674)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-07 14:51:50 +02:00
dependabot[bot]
4ee03c4528 chore(deps): bump golang.org/x/sys from 0.25.0 to 0.26.0 (#1675)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-07 14:51:29 +02:00
Jan-Otto Kröpke
2ef1a5fdf1 logical_disk: Implement Perfdata collector (#1673) 2024-10-07 00:15:54 +02:00
Jan-Otto Kröpke
efb20b1e31 dns: Implement Perfdata collector (#1672) 2024-10-06 00:22:58 +02:00
Jan-Otto Kröpke
ab33f3c49c dhcp: Use new collector interface (#1671) 2024-10-05 21:55:26 +02:00
Jan-Otto Kröpke
5952c51a39 *: Implement collector interface for registry perfdata (#1670) 2024-10-05 21:33:40 +02:00
Jan-Otto Kröpke
2a9a11bd01 process: fix fallback to V1 collector (#1667) 2024-10-03 23:44:36 +02:00
Jan-Otto Kröpke
79baf9921d process: Implement PDH collector and add support for Process V2 (#1666) 2024-10-03 21:24:17 +02:00
Jan-Otto Kröpke
7e9976efd3 chore: cleanup move private packages to internal (#1665) 2024-10-03 20:34:45 +02:00
Jan-Otto Kröpke
5d95610c84 chore: Move private packages to internal (#1664) 2024-10-03 20:23:56 +02:00
Jan-Otto Kröpke
bcfe6df24d dfsr: Implement PDH collector (#1663) 2024-10-03 19:23:20 +02:00
Jan-Otto Kröpke
70156cd106 cache: Implement PDH collector (#1662) 2024-10-03 14:31:44 +02:00
Jan-Otto Kröpke
e6ef2de40c ad: Implement PDH collector (#1660) 2024-10-03 11:59:10 +02:00
Jan-Otto Kröpke
0a78909cf6 *: replace --collectors.[name].* with --collector.[name].* flags (click PR number for more information) (#1659) 2024-10-02 13:24:58 +02:00
Jan-Otto Kröpke
2155d34779 net: expose network interfaces address (#1635) 2024-10-01 23:44:53 +02:00
astigmata
1caef5cc12 tcp: use GetExtendedTcpTable to display states Closed, Listening, SynSent, SynRcvd, CloseWait, TimeWait ... (#1638)
Co-authored-by: Jan-Otto Kröpke <github@jkroepke.de>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <joe@cloudeteer.de>
2024-10-01 23:23:35 +02:00
Jan-Otto Kröpke
14910efd4f updates: add windows update collector (#1652)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2024-10-01 23:23:23 +02:00
Jan-Otto Kröpke
48e0e11063 cpu: add workaround for counter resets related to % Processor Utility metric (#1637) 2024-10-01 10:53:13 +02:00
Jan-Otto Kröpke
996080c1e9 adfs: Implement PDH collector (#1656) 2024-09-29 13:25:04 +02:00
Jan-Otto Kröpke
e6aaf91df1 initiate: close event log handle (#1654) 2024-09-28 21:23:05 +02:00
Jan-Otto Kröpke
b67b930ffc initiate: fix Cannot create another system semaphore error (#1653) 2024-09-28 19:53:12 +02:00
Jan-Otto Kröpke
a1defadf1e collector: add stack trace, if collector panics (#1650) 2024-09-28 15:57:56 +02:00
Jan-Otto Kröpke
01e809315c scheduled_task: fix memory leaks (#1649) 2024-09-28 15:15:15 +02:00
Jan-Otto Kröpke
798bf32dec adcs: Implement PDH collector (#1648) 2024-09-28 13:23:08 +02:00
250 changed files with 11522 additions and 6583 deletions

View File

@@ -37,7 +37,7 @@ jobs:
- name: check - name: check
run: | run: |
PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1) PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1)
if [[ -d "pkg/collector/$PR_TITLE_PREFIX" ]] ||[[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
exit 0 exit 0
fi fi

View File

@@ -44,7 +44,7 @@ jobs:
run: | run: |
curl.exe -L https://github.com/moby/buildkit/releases/download/v${{ env.VERSION_BUILDKIT }}/buildkit-v${{ env.VERSION_BUILDKIT }}.windows-amd64.tar.gz -o buildkit.tar.gz curl.exe -L https://github.com/moby/buildkit/releases/download/v${{ env.VERSION_BUILDKIT }}/buildkit-v${{ env.VERSION_BUILDKIT }}.windows-amd64.tar.gz -o buildkit.tar.gz
tar.exe xvf buildkit.tar.gz tar.exe xvf buildkit.tar.gz
.\bin\buildkitd.exe --register-service .\bin\buildkitd.exe --register-service
Start-Service buildkitd Start-Service buildkitd
- name: Setup Docker Buildx - name: Setup Docker Buildx
@@ -75,32 +75,32 @@ jobs:
- name: Build - name: Build
run: | run: |
$ErrorActionPreference = "Stop" $ErrorActionPreference = "Stop"
$Version = git describe --tags --always $Version = git describe --tags --always
$Version = $Version -replace 'v', '' $Version = $Version -replace 'v', ''
# '+' symbols are invalid characters in image tags # '+' symbols are invalid characters in image tags
$Version = $Version -replace '\+', '_' $Version = $Version -replace '\+', '_'
$Version | Set-Content VERSION -PassThru $Version | Set-Content VERSION -PassThru
make build-all make build-all
# GH requires all files to have different names, so add version/arch to differentiate # GH requires all files to have different names, so add version/arch to differentiate
foreach($Arch in "amd64", "arm64") { foreach($Arch in "amd64", "arm64") {
Move-Item output\$Arch\windows_exporter.exe output\windows_exporter-$Version-$Arch.exe Move-Item output\$Arch\windows_exporter.exe output\windows_exporter-$Version-$Arch.exe
} }
Get-ChildItem -Path output Get-ChildItem -Path output
- name: Build Release Artifacts - name: Build Release Artifacts
run: | run: |
$ErrorActionPreference = "Stop" $ErrorActionPreference = "Stop"
$Version = Get-Content VERSION $Version = Get-Content VERSION
foreach($Arch in "amd64", "arm64") { foreach($Arch in "amd64", "arm64") {
Write-Host "Building windows_exporter $Version msi for $Arch" Write-Host "Building windows_exporter $Version msi for $Arch"
.\installer\build.ps1 -PathToExecutable .\output\windows_exporter-$Version-$Arch.exe -Version $Version -Arch "$Arch" .\installer\build.ps1 -PathToExecutable .\output\windows_exporter-$Version-$Arch.exe -Version $Version -Arch "$Arch"
} }
Move-Item installer\*.msi output\ Move-Item installer\*.msi output\
Get-ChildItem -Path output\ Get-ChildItem -Path output\
@@ -119,7 +119,7 @@ jobs:
env: env:
VERSION: >- VERSION: >-
${{ ${{
startsWith(github.ref, 'refs/tags/') && 'latest' || startsWith(github.ref, 'refs/tags/') && 'latest' ||
( (
github.event_name == 'pull_request' && format('pr-{0}', github.event.number) || github.ref_name github.event_name == 'pull_request' && format('pr-{0}', github.event.number) || github.ref_name
) )

View File

@@ -15,7 +15,9 @@ linters:
- gocognit - gocognit
- goconst - goconst
- gocyclo - gocyclo
- godot
- gomnd - gomnd
- paralleltest
- lll - lll
- maintidx - maintidx
- mnd - mnd
@@ -29,11 +31,12 @@ run:
linters-settings: linters-settings:
gosec: gosec:
excludes: excludes:
- G101 # Potential hardcoded credentials
- G115 # integer overflow conversion - G115 # integer overflow conversion
gci: gci:
sections: sections:
- prefix(github.com/prometheus-community/windows_exporter/pkg/initiate) - prefix(github.com/prometheus-community/windows_exporter/internal/initiate)
- standard # Standard section: captures all standard packages. - standard # Standard section: captures all standard packages.
- default # Default section: contains all imports that could not be matched to another section type. - default # Default section: contains all imports that could not be matched to another section type.
custom-order: true custom-order: true
@@ -48,7 +51,7 @@ linters-settings:
forbidigo: forbidigo:
forbid: forbid:
- "^(fmt\\.Print(|f|ln)|print|println)$" - "^(fmt\\.Print(|f|ln)|print|println)$"
- p: "^syscall\\..*$" - p: "^syscall\\.(.{1,7}|.{7}[^N]|.{9,})$"
msg: use golang.org/x/sys/windows instead of syscall msg: use golang.org/x/sys/windows instead of syscall
- p: "^windows\\.NewLazyDLL$" - p: "^windows\\.NewLazyDLL$"
msg: use NewLazySystemDLL instead NewLazyDLL msg: use NewLazySystemDLL instead NewLazyDLL
@@ -82,7 +85,7 @@ issues:
- text: "don't use ALL_CAPS in Go names; use CamelCase" - text: "don't use ALL_CAPS in Go names; use CamelCase"
linters: linters:
- revive - revive
- path: pkg/perflib/ - path: internal/perfdata/v1/
linters: linters:
- godox - godox
- stylecheck - stylecheck

View File

@@ -2,6 +2,7 @@ go:
# Whenever the Go version is updated here, # Whenever the Go version is updated here,
# .github/workflows should also be updated. # .github/workflows should also be updated.
version: 1.23 version: 1.23
cgo: false
repository: repository:
path: github.com/prometheus-community/windows_exporter path: github.com/prometheus-community/windows_exporter
build: build:
@@ -21,4 +22,5 @@ tarball:
- LICENSE - LICENSE
crossbuild: crossbuild:
platforms: platforms:
- windows - windows/amd64
- windows/arm64

View File

@@ -29,7 +29,7 @@ test:
go test -v ./... go test -v ./...
bench: bench:
go test -v -bench='benchmarkcollector' ./pkg/collector/{cpu,logical_disk,physical_disk,logon,memory,net,printer,process,service,system,tcp,time} go test -v -bench='benchmarkcollector' ./internal/collectors/{cpu,logical_disk,physical_disk,logon,memory,net,printer,process,service,system,tcp,time}
lint: lint:
golangci-lint -c .golangci.yaml run golangci-lint -c .golangci.yaml run

View File

@@ -35,6 +35,7 @@ Name | Description | Enabled by default
[netframework](docs/collector.netframework.md) | .NET Framework metrics | [netframework](docs/collector.netframework.md) | .NET Framework metrics |
[net](docs/collector.net.md) | Network interface I/O | &#10003; [net](docs/collector.net.md) | Network interface I/O | &#10003;
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | &#10003; [os](docs/collector.os.md) | OS metrics (memory, processes, users) | &#10003;
[perfdata](docs/collector.perfdata.md) | Custom perfdata metrics |
[physical_disk](docs/collector.physical_disk.md) | physical disk metrics | &#10003; [physical_disk](docs/collector.physical_disk.md) | physical disk metrics | &#10003;
[printer](docs/collector.printer.md) | Printer metrics | [printer](docs/collector.printer.md) | Printer metrics |
[process](docs/collector.process.md) | Per-process metrics | [process](docs/collector.process.md) | Per-process metrics |
@@ -46,12 +47,11 @@ Name | Description | Enabled by default
[smtp](docs/collector.smtp.md) | IIS SMTP Server | [smtp](docs/collector.smtp.md) | IIS SMTP Server |
[system](docs/collector.system.md) | System calls | &#10003; [system](docs/collector.system.md) | System calls | &#10003;
[tcp](docs/collector.tcp.md) | TCP connections | [tcp](docs/collector.tcp.md) | TCP connections |
[teradici_pcoip](docs/collector.teradici_pcoip.md) | [Teradici PCoIP](https://www.teradici.com/web-help/pcoip_wmi_specs/) session metrics |
[time](docs/collector.time.md) | Windows Time Service |
[thermalzone](docs/collector.thermalzone.md) | Thermal information
[terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS) [terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS)
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | [textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file |
[vmware_blast](docs/collector.vmware_blast.md) | VMware Blast session metrics | [thermalzone](docs/collector.thermalzone.md) | Thermal information |
[time](docs/collector.time.md) | Windows Time Service |
[update](docs/collector.update.md) | Windows Update Service |
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent | [vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples. See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples.
@@ -75,17 +75,18 @@ This can be useful for having different Prometheus servers collect specific metr
windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above. windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above.
| Flag | Description | Default value | | Flag | Description | Default value |
|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|---------------| |--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
| `--web.listen-address` | host:port for exporter. | `:9182` | | `--web.listen-address` | host:port for exporter. | `:9182` |
| `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` | | `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` |
| `--telemetry.max-requests` | Maximum number of concurrent requests. 0 to disable. | `5` | | `--telemetry.max-requests` | Maximum number of concurrent requests. 0 to disable. | `5` |
| `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default." | `[defaults]` | | `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default." | `[defaults]` |
| `--collectors.print` | If true, print available collectors and exit. | | | `--collectors.print` | If true, print available collectors and exit. | |
| `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` | | `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` |
| `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None | | `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None |
| `--config.file` | [Using a config file](#using-a-configuration-file) from path or URL | None | | `--config.file` | [Using a config file](#using-a-configuration-file) from path or URL | None |
| `--config.file.insecure-skip-verify` | Skip TLS when loading config file from URL | false | | `--config.file.insecure-skip-verify` | Skip TLS when loading config file from URL | false |
| `--log.file` | Output file of log messages. One of [stdout, stderr, eventlog, \<path to log file>]<br>**NOTE:** The MSI installer will add a default argument to the installed service setting this to eventlog | stderr |
## Installation ## Installation

View File

@@ -1,3 +1,5 @@
# example configuration file for windows_exporter
collectors: collectors:
enabled: cpu,cpu_info,exchange,iis,logical_disk,logon,memory,net,os,process,remote_fx,service,system,tcp,time,terminal_services,textfile enabled: cpu,cpu_info,exchange,iis,logical_disk,logon,memory,net,os,process,remote_fx,service,system,tcp,time,terminal_services,textfile
collector: collector:

View File

@@ -1,140 +0,0 @@
{{ template "head" . }}
{{ template "prom_content_head" . }}
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
<h3>CPU Usage</h3>
<div id="cpuGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#cpuGraph"),
expr: "sum by (mode)(irate(windows_cpu_time_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
renderer: 'area',
max: {{ with printf "count(count by (cpu)(windows_cpu_time_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: 'Cores'
})
</script>
<h3>Network Utilization</h3>
<div id="networkioGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#networkioGraph"),
expr: [
"irate(windows_net_bytes_sent_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
"irate(windows_net_bytes_received_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
],
min: 0,
name: [ 'sent', 'received' ],
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "B",
yTitle: 'Network IO'
})
</script>
<h3>Disk I/O Utilization</h3>
<div id="diskioGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#diskioGraph"),
expr: [
"100 - irate(windows_logical_disk_idle_seconds_total{job='node',instance='{{ .Params.instance }}',volume!~'^HarddiskVolume.*$'}[5m]) * 100",
],
min: 0,
name: '[[ volume ]]',
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "%",
yTitle: 'Disk I/O Utilization'
})
</script>
<h3>Memory</h3>
<div id="memoryGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#memoryGraph"),
renderer: 'area',
expr: [
"windows_cs_physical_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
"windows_os_physical_memory_free_bytes{job='node',instance='{{ .Params.instance }}'}",
"windows_cs_physical_memory__bytes{job='node',instance='{{ .Params.instance }}'} - windows_os_physical_memory_free_bytes{job='node',instance='{{.Params.instance}}'}",
"windows_os_virtual_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
],
name: ["Physical", "Free", "Used", "Virtual"],
min: 0,
yUnits: "B",
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
yTitle: 'Memory'
})
</script>
{{ template "prom_right_table_head" }}
<tr><th colspan="2">Overview</th></tr>
<tr>
<td>User CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(windows_cpu_time_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(windows_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
</tr>
<tr>
<td>Privileged CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(windows_cpu_time_total{job='node',instance='%s',mode='privileged'}[5m])) * 100 / count(count by (cpu)(windows_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
</tr>
<tr>
<td>Memory Total</td>
<td>{{ template "prom_query_drilldown" (args (printf "windows_cs_physical_memory_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
</tr>
<tr>
<td>Memory Free</td>
<td>{{ template "prom_query_drilldown" (args (printf "windows_os_physical_memory_free_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
</tr>
<tr>
<th colspan="2">Network</th>
</tr>
{{ range printf "windows_net_bytes_received_total{job='node',instance='%s',nic!='isatap_ec2_internal'}" .Params.instance | query | sortByLabel "nic" }}
<tr>
<td>{{ .Labels.nic }} Received</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(windows_net_bytes_received_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
</tr>
<tr>
<td>{{ .Labels.nic }} Transmitted</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(windows_net_bytes_sent_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
</tr>
{{ end }}
</tr>
<tr>
<th colspan="2">Disks</th>
</tr>
{{ range printf "windows_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
<tr>
<td>{{ .Labels.volume }} Utilization</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 - irate(windows_logical_disk_idle_seconds_total{job='node',instance='%s',volume='%s'}[5m]) * 100" .Labels.instance .Labels.volume) "%" "printf.1f") }}</td>
</tr>
{{ end }}
{{ range printf "windows_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
<tr>
<td>{{ .Labels.volume }} Throughput</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(windows_logical_disk_read_bytes_total{job='node',instance='%s',volume='%s'}[5m]) + irate(windows_logical_disk_write_bytes_total{job='node',instance='%s',volume='%s'}[5m])" .Labels.instance .Labels.volume .Labels.instance .Labels.volume) "B/s" "humanize") }}</td>
</tr>
{{ end }}
<tr>
<th colspan="2">Filesystem Fullness</th>
</tr>
{{ define "roughlyNearZero" }}
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
{{ end }}
{{ range printf "windows_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
<tr>
<td>{{ .Labels.volume }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 - windows_logical_disk_free_bytes{job='node',instance='%s',volume='%s'} / windows_logical_disk_size_bytes{job='node'} * 100" .Labels.instance .Labels.volume) "%" "roughlyNearZero") }}</td>
</tr>
{{ end }}
</tr>
{{ template "prom_right_table_tail" }}
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View File

@@ -37,10 +37,9 @@ This directory contains documentation of the collectors in the windows_exporter,
- [`smtp`](collector.smtp.md) - [`smtp`](collector.smtp.md)
- [`system`](collector.system.md) - [`system`](collector.system.md)
- [`tcp`](collector.tcp.md) - [`tcp`](collector.tcp.md)
- [`teradici_pcoip`](collector.teradici_pcoip.md)
- [`terminal_services`](collector.terminal_services.md) - [`terminal_services`](collector.terminal_services.md)
- [`textfile`](collector.textfile.md) - [`textfile`](collector.textfile.md)
- [`thermalzone`](collector.thermalzone.md) - [`thermalzone`](collector.thermalzone.md)
- [`time`](collector.time.md) - [`time`](collector.time.md)
- [`update`](collector.update.md)
- [`vmware`](collector.vmware.md) - [`vmware`](collector.vmware.md)
- [`vmware_blast`](collector.vmware_blast.md)

View File

@@ -2,13 +2,11 @@
The logon collector exposes metrics detailing the active user logon sessions. The logon collector exposes metrics detailing the active user logon sessions.
||| | | |
-|- |---------------------|-----------|
Metric name prefix | `logon` | Metric name prefix | `logon` |
Classes | [`Win32_LogonSession`](https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession) | Source | Win32 API |
Enabled by default? | No | Enabled by default? | No |
> :warning: **On some deployments, this collector seems to have some memory/timeout issues**: See [#583](https://github.com/prometheus-community/windows_exporter/issues/583)
## Flags ## Flags
@@ -16,21 +14,65 @@ None
## Metrics ## Metrics
Name | Description | Type | Labels | Name | Description | Type | Labels |
-----|-------------|------|------- |-------------------------------------------|--------------------------------------------|-------|------------------------------------|
`windows_logon_logon_type` | Number of active user logon sessions | gauge | status | `windows_logon_session_logon_timestamp_seconds` | timestamp of the logon session in seconds. | gauge | `domain`, `id`, `type`, `username` |
### Example metric ### Example metric
Query the total number of interactive logon sessions Query the total number of interactive logon sessions
``` ```
windows_logon_logon_type{status="interactive"} # HELP windows_logon_session_logon_timestamp_seconds timestamp of the logon session in seconds.
# TYPE windows_logon_session_logon_timestamp_seconds gauge
windows_logon_session_logon_timestamp_seconds{domain="",id="0x0:0x8c54",type="System",username=""} 1.72876928e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x991a",type="Interactive",username="UMFD-1"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x9933",type="Interactive",username="UMFD-0"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x994a",type="Interactive",username="UMFD-0"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x999d",type="Interactive",username="UMFD-1"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf25a",type="Interactive",username="UMFD-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf290",type="Interactive",username="UMFD-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x130241",type="Network",username="vm-jok-dev$"} 1.728769625e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x24f7c9",type="Network",username="vm-jok-dev$"} 1.728770121e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x276846",type="Network",username="vm-jok-dev$"} 1.728770195e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e4",type="Service",username="vm-jok-dev$"} 1.728769283e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e7",type="System",username="vm-jok-dev$"} 1.728769279e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x71d0f",type="Network",username="vm-jok-dev$"} 1.728769324e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x720a3",type="Network",username="vm-jok-dev$"} 1.728769324e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x725cb",type="Network",username="vm-jok-dev$"} 1.728769324e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x753d8",type="Network",username="vm-jok-dev$"} 1.728769325e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xa3913",type="Network",username="vm-jok-dev$"} 1.728769385e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xbe7f2",type="Network",username="jok"} 1.728769531e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xc76c4",type="RemoteInteractive",username="jok"} 1.728769533e+09
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e3",type="Service",username="IUSR"} 1.728769295e+09
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e5",type="Service",username="LOCAL SERVICE"} 1.728769283e+09
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xae4c7",type="Service",username="MSSQLSERVER"} 1.728769425e+09
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xb42f1",type="Service",username="SQLTELEMETRY"} 1.728769431e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfbac",type="Interactive",username="DWM-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfc72",type="Interactive",username="DWM-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdedd",type="Interactive",username="DWM-1"} 1.728769283e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdefd",type="Interactive",username="DWM-1"} 1.728769283e+09
``` ```
### Possible values for `type`
- System
- Interactive
- Network
- Batch
- Service
- Proxy
- Unlock
- NetworkCleartext
- NewCredentials
- RemoteInteractive
- CachedInteractive
- CachedRemoteInteractive
- CachedUnlock
## Useful queries ## Useful queries
Query the total number of local and remote (I.E. Terminal Services) interactive sessions. Query the total number of local and remote (I.E. Terminal Services) interactive sessions.
``` ```
windows_logon_logon_type{status=~"interactive|remote_interactive"} count(windows_logon_logon_type{type=~"Interactive|RemoteInteractive"}) by (type)
``` ```
## Alerting examples ## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_ _This collector doesnt yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -26,6 +26,7 @@ Name | Description | Type | Labels
`windows_tcp_segments_received_total` | Total segments received, including those received in error. This count includes segments received on currently established connections | counter | af `windows_tcp_segments_received_total` | Total segments received, including those received in error. This count includes segments received on currently established connections | counter | af
`windows_tcp_segments_retransmitted_total` | Total segments retransmitted. That is, segments transmitted that contain one or more previously transmitted bytes | counter | af `windows_tcp_segments_retransmitted_total` | Total segments retransmitted. That is, segments transmitted that contain one or more previously transmitted bytes | counter | af
`windows_tcp_segments_sent_total` | Total segments sent, including those on current connections, but excluding those containing *only* retransmitted bytes | counter | af `windows_tcp_segments_sent_total` | Total segments sent, including those on current connections, but excluding those containing *only* retransmitted bytes | counter | af
`windows_tcp_connections_state_count` | Number of TCP connections by state among: CLOSED, LISTENING, SYN_SENT, SYN_RECEIVED, ESTABLISHED, FIN_WAIT1, FIN_WAIT2, CLOSE_WAIT, CLOSING, LAST_ACK, TIME_WAIT, DELETE_TCB | gauge | af
### Example metric ### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_ _This collector does not yet have explained examples, we would appreciate your help adding them!_

View File

@@ -1,64 +0,0 @@
# teradici_pcoip collector
The teradici_pcoip collector exposes metrics relating to Teradici PCoIP sessions
|||
-|-
Metric name prefix | `teradici_pcoip`
Classes | `Win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics`, `Win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics`,`Win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics`,`Win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics`,`Win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_teradici_pcoip_audio_bytes_received_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_audio_bytes_sent_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_audio_rx_bw_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_audio_tx_bw_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_audio_tx_bw_limit_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_bytes_received_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_bytes_sent_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_packets_received_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_packets_sent_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_rx_packets_lost_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_session_duration_seconds_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_tx_packets_lost_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_imaging_active_min_quality` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_imaging_apex2800_offload` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_imaging_bytes_received_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_imaging_bytes_sent_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_imaging_decoder_capability_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_imaging_encoded_frames_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_imaging_megapixel_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_imaging_negative_acks_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_imaging_rx_bw_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_imaging_svga_devtap_frames_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_imaging_tx_bw_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_round_trip_latency_ms` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_rx_bw_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_rx_bw_peak_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_rx_packet_loss_percent` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_rx_packet_loss_percent_base` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_tx_bw_active_limit_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_tx_bw_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_tx_bw_limit_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_tx_packet_loss_percent` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_tx_packet_loss_percent_base` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_usb_bytes_received_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_usb_bytes_sent_total` | _Not yet documented_ | counter | None
`windows_teradici_pcoip_usb_rx_bw_kbit_persec` | _Not yet documented_ | gauge | None
`windows_teradici_pcoip_usb_tx_bw_kbit_persec` | _Not yet documented_ | gauge | None
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -6,7 +6,7 @@ The textfile collector exposes metrics from files written by other processes.
-|- -|-
Metric name prefix | `textfile` Metric name prefix | `textfile`
Classes | None Classes | None
Enabled by default? | Yes Enabled by default? | No
## Flags ## Flags

48
docs/collector.update.md Normal file
View File

@@ -0,0 +1,48 @@
# update collector
The update collector exposes the Windows Update service metrics. Note that the Windows Update service (`wuauserv`) must be running, else metric collection will fail.
The Windows Update service is responsible for managing the installation of updates for the operating system and other Microsoft software. The service can be configured to automatically download and install updates, or to notify the user when updates are available.
| | |
|---------------------|------------------------|
| Metric name prefix | `update` |
| Data source | Windows Update service |
| Enabled by default? | No |
## Flags
### `--collector.updates.online`
Whether to search for updates online. If set to `false`, the collector will only list updates that are already found by the Windows Update service.
Set to `true` to search for updates online, which will take longer to complete.
### `--collector.updates.scrape-interval`
Define the interval of scraping Windows Update information
## Metrics
| Name | Description | Type | Labels |
|--------------------------------|-----------------------------------------------|-------|-------------------------------|
| `windows_updates_pending_info` | Expose information single pending update item | gauge | `category`,`severity`,`title` |
| `windows_updates_scrape_query_duration_seconds` | Duration of the last scrape query to the Windows Update API | gauge | |
| `windows_updates_scrape_timestamp_seconds` | Timestamp of the last scrape | gauge | |
### Example metrics
```
# HELP windows_updates_pending Pending Windows Updates
# TYPE windows_updates_pending gauge
windows_updates_pending{category="Drivers",severity="",title="Intel Corporation - Bluetooth - 23.60.5.10"} 1
# HELP windows_updates_scrape_query_duration_seconds Duration of the last scrape query to the Windows Update API
# TYPE windows_updates_scrape_query_duration_seconds gauge
windows_updates_scrape_query_duration_seconds 2.8161838
# HELP windows_updates_scrape_timestamp_seconds Timestamp of the last scrape
# TYPE windows_updates_scrape_timestamp_seconds gauge
windows_updates_scrape_timestamp_seconds 1.727539734e+09
```
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,98 +0,0 @@
# vmware_blast collector
The vmware_blast collector exposes metrics relating to VMware Blast sessions
|||
-|-
Metric name prefix | `vmware_blast`
Classes | `Win32_PerfRawData_Counters_VMwareBlastAudioCounters`,`Win32_PerfRawData_Counters_VMwareBlastCDRCounters`,`Win32_PerfRawData_Counters_VMwareBlastClipboardCounters`,`Win32_PerfRawData_Counters_VMwareBlastHTML5MMRCounters`,`Win32_PerfRawData_Counters_VMwareBlastImagingCounters`,`Win32_PerfRawData_Counters_VMwareBlastRTAVCounters`,`Win32_PerfRawData_Counters_VMwareBlastSerialPortandScannerCounters`,`Win32_PerfRawData_Counters_VMwareBlastSessionCounters`,`Win32_PerfRawData_Counters_VMwareBlastSkypeforBusinessControlCounters`,`Win32_PerfRawData_Counters_VMwareBlastThinPrintCounters`,`Win32_PerfRawData_Counters_VMwareBlastUSBCounters`,`Win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters`
Enabled by default? | No
## Flags
None
## Metrics
Some of these metrics may not be collected, depending on the installation options chosen when installing the Horizon agent
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_vmware_blast_audio_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_audio_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_audio_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_audio_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_cdr_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_cdr_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_cdr_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_cdr_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_clipboard_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_clipboard_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_clipboard_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_clipboard_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_html5_mmr_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_html5_mmr_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_html5_mmr_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_html5_mmr_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_dirty_frames_per_second` | _Not yet documented_ | gauge | None
`windows_vmware_blast_imaging_fbc_rate` | _Not yet documented_ | gauge | None
`windows_vmware_blast_imaging_frames_per_second` | _Not yet documented_ | gauge | None
`windows_vmware_blast_imaging_poll_rate` | _Not yet documented_ | gauge | None
`windows_vmware_blast_imaging_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_dirty_frames_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_fbc_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_frames_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_poll_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_imaging_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_rtav_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_rtav_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_rtav_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_rtav_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_serial_port_and_scanner_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_serial_port_and_scanner_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_serial_port_and_scanner_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_serial_port_and_scanner_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_automatic_reconnect_count_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_cumlative_received_bytes_over_tcp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_cumlative_received_bytes_over_udp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_cumlative_transmitted_bytes_over_tcp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_cumlative_transmitted_bytes_over_udp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_estimated_bandwidth_uplink` | _Not yet documented_ | gauge | None
`windows_vmware_blast_session_instantaneous_received_bytes_over_tcp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_instantaneous_received_bytes_over_udp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_instantaneous_transmitted_bytes_over_tcp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_instantaneous_transmitted_bytes_over_udp_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_jitter_uplink` | _Not yet documented_ | gauge | None
`windows_vmware_blast_session_packet_loss_uplink` | _Not yet documented_ | gauge | None
`windows_vmware_blast_session_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_rtt` | _Not yet documented_ | gauge | None
`windows_vmware_blast_session_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_session_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_skype_for_business_control_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_skype_for_business_control_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_skype_for_business_control_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_skype_for_business_control_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_thinprint_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_thinprint_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_thinprint_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_thinprint_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_usb_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_usb_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_usb_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_usb_transmitted_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_windows_media_mmr_received_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_windows_media_mmr_received_packets_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_windows_media_mmr_transmitted_bytes_total` | _Not yet documented_ | counter | None
`windows_vmware_blast_windows_media_mmr_transmitted_packets_total` | _Not yet documented_ | counter | None
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -7,8 +7,7 @@ package main
//goland:noinspection GoUnsortedImport //goland:noinspection GoUnsortedImport
//nolint:gofumpt //nolint:gofumpt
import ( import (
// Its important that we do these first so that we can register with the Windows service control ASAP to avoid timeouts. "github.com/prometheus-community/windows_exporter/internal/initiate"
"github.com/prometheus-community/windows_exporter/pkg/initiate"
"context" "context"
"errors" "errors"
@@ -25,31 +24,31 @@ import (
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/config"
"github.com/prometheus-community/windows_exporter/internal/httphandler"
"github.com/prometheus-community/windows_exporter/internal/log"
"github.com/prometheus-community/windows_exporter/internal/log/flag"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus-community/windows_exporter/pkg/collector" "github.com/prometheus-community/windows_exporter/pkg/collector"
"github.com/prometheus-community/windows_exporter/pkg/config"
"github.com/prometheus-community/windows_exporter/pkg/httphandler"
winlog "github.com/prometheus-community/windows_exporter/pkg/log"
"github.com/prometheus-community/windows_exporter/pkg/log/flag"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web" "github.com/prometheus/exporter-toolkit/web"
webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
// Mapping of priority names to uin32 values required by windows.SetPriorityClass.
var priorityStringToInt = map[string]uint32{
"realtime": windows.REALTIME_PRIORITY_CLASS,
"high": windows.HIGH_PRIORITY_CLASS,
"abovenormal": windows.ABOVE_NORMAL_PRIORITY_CLASS,
"normal": windows.NORMAL_PRIORITY_CLASS,
"belownormal": windows.BELOW_NORMAL_PRIORITY_CLASS,
"low": windows.IDLE_PRIORITY_CLASS,
}
func main() { func main() {
os.Exit(run()) exitCode := run()
// If we are running as a service, we need to signal the service control manager that we are done.
if !initiate.IsService {
os.Exit(exitCode)
}
initiate.ExitCodeCh <- exitCode
// Wait for the service control manager to signal that we are done.
<-initiate.StopCh
} }
func run() int { func run() int {
@@ -99,8 +98,8 @@ func run() int {
).Default("normal").String() ).Default("normal").String()
) )
winlogConfig := &winlog.Config{} logConfig := &log.Config{}
flag.AddFlags(app, winlogConfig) flag.AddFlags(app, logConfig)
app.Version(version.Print("windows_exporter")) app.Version(version.Print("windows_exporter"))
app.HelpFlag.Short('h') app.HelpFlag.Short('h')
@@ -119,7 +118,7 @@ func run() int {
return 1 return 1
} }
logger, err := winlog.New(winlogConfig) logger, err := log.New(logConfig)
if err != nil { if err != nil {
//nolint:sloglint // we do not have an logger yet //nolint:sloglint // we do not have an logger yet
slog.Error("failed to create logger", slog.Error("failed to create logger",
@@ -161,7 +160,7 @@ func run() int {
return 1 return 1
} }
logger, err = winlog.New(winlogConfig) logger, err = log.New(logConfig)
if err != nil { if err != nil {
//nolint:sloglint // we do not have an logger yet //nolint:sloglint // we do not have an logger yet
slog.Error("failed to create logger", slog.Error("failed to create logger",
@@ -180,21 +179,20 @@ func run() int {
return 0 return 0
} }
// Only set process priority if a non-default and valid value has been set if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil {
if priority, ok := priorityStringToInt[*processPriority]; ok && priority != windows.NORMAL_PRIORITY_CLASS { logger.Error("failed to set process priority",
logger.Debug("setting process priority to " + *processPriority) slog.Any("err", err),
)
if err = setPriorityWindows(os.Getpid(), priority); err != nil { return 1
logger.Error("failed to set process priority",
slog.Any("err", err),
)
return 1
}
} }
enabledCollectorList := utils.ExpandEnabledCollectors(*enabledCollectors) enabledCollectorList := utils.ExpandEnabledCollectors(*enabledCollectors)
collectors.Enable(enabledCollectorList) if err := collectors.Enable(enabledCollectorList); err != nil {
logger.Error(err.Error())
return 1
}
// Initialize collectors before loading // Initialize collectors before loading
if err = collectors.Build(logger); err != nil { if err = collectors.Build(logger); err != nil {
@@ -305,20 +303,43 @@ func printCollectorsToStdout() {
} }
func logCurrentUser(logger *slog.Logger) { func logCurrentUser(logger *slog.Logger) {
if u, err := user.Current(); err == nil { u, err := user.Current()
logger.Info("Running as " + u.Username) if err != nil {
logger.Warn("Unable to determine which user is running this exporter. More info: https://github.com/golang/go/issues/37348",
if strings.Contains(u.Username, "ContainerAdministrator") || strings.Contains(u.Username, "ContainerUser") { slog.Any("err", err),
logger.Warn("Running as a preconfigured Windows Container user. This may mean you do not have Windows HostProcess containers configured correctly and some functionality will not work as expected.") )
}
return return
} }
logger.Warn("Unable to determine which user is running this exporter. More info: https://github.com/golang/go/issues/37348") logger.Info("Running as " + u.Username)
if strings.Contains(u.Username, "ContainerAdministrator") || strings.Contains(u.Username, "ContainerUser") {
logger.Warn("Running as a preconfigured Windows Container user. This may mean you do not have Windows HostProcess containers configured correctly and some functionality will not work as expected.")
}
} }
func setPriorityWindows(pid int, priority uint32) error { // setPriorityWindows sets the priority of the current process to the specified value.
func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
// Mapping of priority names to uin32 values required by windows.SetPriorityClass.
priorityStringToInt := map[string]uint32{
"realtime": windows.REALTIME_PRIORITY_CLASS,
"high": windows.HIGH_PRIORITY_CLASS,
"abovenormal": windows.ABOVE_NORMAL_PRIORITY_CLASS,
"normal": windows.NORMAL_PRIORITY_CLASS,
"belownormal": windows.BELOW_NORMAL_PRIORITY_CLASS,
"low": windows.IDLE_PRIORITY_CLASS,
}
winPriority, ok := priorityStringToInt[priority]
// Only set process priority if a non-default and valid value has been set
if !ok || winPriority != windows.NORMAL_PRIORITY_CLASS {
return nil
}
logger.Debug("setting process priority to " + priority)
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights // https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
handle, err := windows.OpenProcess( handle, err := windows.OpenProcess(
windows.STANDARD_RIGHTS_REQUIRED|windows.SYNCHRONIZE|windows.SPECIFIC_RIGHTS_ALL, windows.STANDARD_RIGHTS_REQUIRED|windows.SYNCHRONIZE|windows.SPECIFIC_RIGHTS_ALL,
@@ -328,7 +349,7 @@ func setPriorityWindows(pid int, priority uint32) error {
return fmt.Errorf("failed to open own process: %w", err) return fmt.Errorf("failed to open own process: %w", err)
} }
if err = windows.SetPriorityClass(handle, priority); err != nil { if err = windows.SetPriorityClass(handle, winPriority); err != nil {
return fmt.Errorf("failed to set priority class: %w", err) return fmt.Errorf("failed to set priority class: %w", err)
} }

34
go.mod
View File

@@ -3,34 +3,36 @@ module github.com/prometheus-community/windows_exporter
go 1.23 go 1.23
require ( require (
github.com/Microsoft/hcsshim v0.12.7 github.com/Microsoft/hcsshim v0.12.9
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/bmatcuk/doublestar/v4 v4.6.1 github.com/bmatcuk/doublestar/v4 v4.7.1
github.com/dimchansky/utfbom v1.1.1 github.com/dimchansky/utfbom v1.1.1
github.com/go-ole/go-ole v1.3.0 github.com/go-ole/go-ole v1.3.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_model v0.6.1 github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.59.2-0.20240918152650-14bac55a992f github.com/prometheus/common v0.60.1
github.com/prometheus/exporter-toolkit v0.13.0 github.com/prometheus/exporter-toolkit v0.13.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/yusufpapurcu/wmi v1.2.4 golang.org/x/sys v0.26.0
golang.org/x/sys v0.25.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
require ( require (
github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/typeurl/v2 v2.2.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/klauspost/compress v1.17.10 // indirect github.com/klauspost/compress v1.17.11 // indirect
github.com/mdlayher/socket v0.5.1 // indirect github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -41,13 +43,13 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect
golang.org/x/net v0.29.0 // indirect golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.8.0 // indirect golang.org/x/sync v0.8.0 // indirect
golang.org/x/text v0.18.0 // indirect golang.org/x/text v0.19.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/grpc v1.67.0 // indirect google.golang.org/grpc v1.67.1 // indirect
google.golang.org/protobuf v1.34.2 // indirect google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
) )

88
go.sum
View File

@@ -2,16 +2,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.7 h1:MP6R1spmjxTE4EU4J3YsrTxn8CjvN9qwjTKJXldFaRg= github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
github.com/Microsoft/hcsshim v0.12.7/go.mod h1:HPbAuJ9BvQYYZbB4yEQcyGIsTP5L4yHKeO9XO149AEM= github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -19,8 +19,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -32,10 +36,11 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
@@ -63,8 +68,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -83,15 +90,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.59.2-0.20240918152650-14bac55a992f h1:3okwxT2ame6iNnOMGt2bH7JISqpwGn2KoMZ2bVFBQ6I= github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.59.2-0.20240918152650-14bac55a992f/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04=
github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -111,67 +118,80 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -181,8 +201,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -1,4 +1,4 @@
<Wix xmlns="http://wixtoolset.org/schemas/v4/wxs" xmlns:netfx="http://wixtoolset.org/schemas/v4/wxs/netfx" <Wix xmlns="http://wixtoolset.org/schemas/v4/wxs"
xmlns:util="http://wixtoolset.org/schemas/v4/wxs/util"> xmlns:util="http://wixtoolset.org/schemas/v4/wxs/util">
<Fragment> <Fragment>
<DirectoryRef Id="APPLICATIONFOLDER"> <DirectoryRef Id="APPLICATIONFOLDER">

File diff suppressed because it is too large Load Diff

View File

@@ -3,8 +3,8 @@ package ad_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/ad" "github.com/prometheus-community/windows_exporter/internal/collector/ad"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,359 @@
package ad
const (
abANRPerSec = "AB ANR/sec"
abBrowsesPerSec = "AB Browses/sec"
abClientSessions = "AB Client Sessions"
abMatchesPerSec = "AB Matches/sec"
abPropertyReadsPerSec = "AB Property Reads/sec"
abProxyLookupsPerSec = "AB Proxy Lookups/sec"
abSearchesPerSec = "AB Searches/sec"
approximateHighestDNT = "Approximate highest DNT"
atqEstimatedQueueDelay = "ATQ Estimated Queue Delay"
atqOutstandingQueuedRequests = "ATQ Outstanding Queued Requests"
_ = "ATQ Queue Latency"
atqRequestLatency = "ATQ Request Latency"
atqThreadsLDAP = "ATQ Threads LDAP"
atqThreadsOther = "ATQ Threads Other"
atqThreadsTotal = "ATQ Threads Total"
baseSearchesPerSec = "Base searches/sec"
databaseAddsPerSec = "Database adds/sec"
databaseDeletesPerSec = "Database deletes/sec"
databaseModifiesPerSec = "Database modifys/sec"
databaseRecyclesPerSec = "Database recycles/sec"
digestBindsPerSec = "Digest Binds/sec"
_ = "DirSync session throttling rate"
_ = "DirSync sessions in progress"
draHighestUSNCommittedHighPart = "DRA Highest USN Committed (High part)"
draHighestUSNCommittedLowPart = "DRA Highest USN Committed (Low part)"
draHighestUSNIssuedHighPart = "DRA Highest USN Issued (High part)"
draHighestUSNIssuedLowPart = "DRA Highest USN Issued (Low part)"
draInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot = "DRA Inbound Bytes Compressed (Between Sites, After Compression) Since Boot"
draInboundBytesCompressedBetweenSitesAfterCompressionPerSec = "DRA Inbound Bytes Compressed (Between Sites, After Compression)/sec"
draInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot = "DRA Inbound Bytes Compressed (Between Sites, Before Compression) Since Boot"
draInboundBytesCompressedBetweenSitesBeforeCompressionPerSec = "DRA Inbound Bytes Compressed (Between Sites, Before Compression)/sec"
draInboundBytesNotCompressedWithinSiteSinceBoot = "DRA Inbound Bytes Not Compressed (Within Site) Since Boot"
draInboundBytesNotCompressedWithinSitePerSec = "DRA Inbound Bytes Not Compressed (Within Site)/sec"
draInboundBytesTotalSinceBoot = "DRA Inbound Bytes Total Since Boot"
draInboundBytesTotalPerSec = "DRA Inbound Bytes Total/sec"
draInboundFullSyncObjectsRemaining = "DRA Inbound Full Sync Objects Remaining"
draInboundLinkValueUpdatesRemainingInPacket = "DRA Inbound Link Value Updates Remaining in Packet"
_ = "DRA Inbound Link Values/sec"
draInboundObjectUpdatesRemainingInPacket = "DRA Inbound Object Updates Remaining in Packet"
draInboundObjectsAppliedPerSec = "DRA Inbound Objects Applied/sec"
draInboundObjectsFilteredPerSec = "DRA Inbound Objects Filtered/sec"
draInboundObjectsPerSec = "DRA Inbound Objects/sec"
draInboundPropertiesAppliedPerSec = "DRA Inbound Properties Applied/sec"
draInboundPropertiesFilteredPerSec = "DRA Inbound Properties Filtered/sec"
draInboundPropertiesTotalPerSec = "DRA Inbound Properties Total/sec"
_ = "DRA Inbound Sync Link Deletion/sec"
draInboundTotalUpdatesRemainingInPacket = "DRA Inbound Total Updates Remaining in Packet"
draInboundValuesDNsOnlyPerSec = "DRA Inbound Values (DNs only)/sec"
draInboundValuesTotalPerSec = "DRA Inbound Values Total/sec"
_ = "DRA number of NC replication calls since boot"
_ = "DRA number of successful NC replication calls since boot"
draOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot = "DRA Outbound Bytes Compressed (Between Sites, After Compression) Since Boot"
draOutboundBytesCompressedBetweenSitesAfterCompressionPerSec = "DRA Outbound Bytes Compressed (Between Sites, After Compression)/sec"
draOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot = "DRA Outbound Bytes Compressed (Between Sites, Before Compression) Since Boot"
draOutboundBytesCompressedBetweenSitesBeforeCompressionPerSec = "DRA Outbound Bytes Compressed (Between Sites, Before Compression)/sec"
draOutboundBytesNotCompressedWithinSiteSinceBoot = "DRA Outbound Bytes Not Compressed (Within Site) Since Boot"
draOutboundBytesNotCompressedWithinSitePerSec = "DRA Outbound Bytes Not Compressed (Within Site)/sec"
draOutboundBytesTotalSinceBoot = "DRA Outbound Bytes Total Since Boot"
draOutboundBytesTotalPerSec = "DRA Outbound Bytes Total/sec"
draOutboundObjectsFilteredPerSec = "DRA Outbound Objects Filtered/sec"
draOutboundObjectsPerSec = "DRA Outbound Objects/sec"
draOutboundPropertiesPerSec = "DRA Outbound Properties/sec"
draOutboundValuesDNsOnlyPerSec = "DRA Outbound Values (DNs only)/sec"
draOutboundValuesTotalPerSec = "DRA Outbound Values Total/sec"
draPendingReplicationOperations = "DRA Pending Replication Operations"
draPendingReplicationSynchronizations = "DRA Pending Replication Synchronizations"
draSyncFailuresOnSchemaMismatch = "DRA Sync Failures on Schema Mismatch"
draSyncRequestsMade = "DRA Sync Requests Made"
draSyncRequestsSuccessful = "DRA Sync Requests Successful"
draThreadsGettingNCChanges = "DRA Threads Getting NC Changes"
draThreadsGettingNCChangesHoldingSemaphore = "DRA Threads Getting NC Changes Holding Semaphore"
_ = "DRA total number of Busy failures since boot"
_ = "DRA total number of MissingParent failures since boot"
_ = "DRA total number of NotEnoughAttrs/MissingObject failures since boot"
_ = "DRA total number of Preempted failures since boot"
_ = "DRA total time of applying replication package since boot"
_ = "DRA total time of NC replication calls since boot"
_ = "DRA total time of successful NC replication calls since boot"
_ = "DRA total time of successfully applying replication package since boot"
_ = "DRA total time on waiting async replication packages since boot"
_ = "DRA total time on waiting sync replication packages since boot"
dsPercentReadsFromDRA = "DS % Reads from DRA"
dsPercentReadsFromKCC = "DS % Reads from KCC"
dsPercentReadsFromLSA = "DS % Reads from LSA"
dsPercentReadsFromNSPI = "DS % Reads from NSPI"
dsPercentReadsFromNTDSAPI = "DS % Reads from NTDSAPI"
dsPercentReadsFromSAM = "DS % Reads from SAM"
dsPercentReadsOther = "DS % Reads Other"
dsPercentSearchesFromDRA = "DS % Searches from DRA"
dsPercentSearchesFromKCC = "DS % Searches from KCC"
dsPercentSearchesFromLDAP = "DS % Searches from LDAP"
dsPercentSearchesFromLSA = "DS % Searches from LSA"
dsPercentSearchesFromNSPI = "DS % Searches from NSPI"
dsPercentSearchesFromNTDSAPI = "DS % Searches from NTDSAPI"
dsPercentSearchesFromSAM = "DS % Searches from SAM"
dsPercentSearchesOther = "DS % Searches Other"
dsPercentWritesFromDRA = "DS % Writes from DRA"
dsPercentWritesFromKCC = "DS % Writes from KCC"
dsPercentWritesFromLDAP = "DS % Writes from LDAP"
dsPercentWritesFromLSA = "DS % Writes from LSA"
dsPercentWritesFromNSPI = "DS % Writes from NSPI"
dsPercentWritesFromNTDSAPI = "DS % Writes from NTDSAPI"
dsPercentWritesFromSAM = "DS % Writes from SAM"
dsPercentWritesOther = "DS % Writes Other"
dsClientBindsPerSec = "DS Client Binds/sec"
dsClientNameTranslationsPerSec = "DS Client Name Translations/sec"
dsDirectoryReadsPerSec = "DS Directory Reads/sec"
dsDirectorySearchesPerSec = "DS Directory Searches/sec"
dsDirectoryWritesPerSec = "DS Directory Writes/sec"
dsMonitorListSize = "DS Monitor List Size"
dsNameCacheHitRate = "DS Name Cache hit rate"
dsNotifyQueueSize = "DS Notify Queue Size"
dsSearchSubOperationsPerSec = "DS Search sub-operations/sec"
dsSecurityDescriptorPropagationsEvents = "DS Security Descriptor Propagations Events"
dsSecurityDescriptorPropagatorAverageExclusionTime = "DS Security Descriptor Propagator Average Exclusion Time"
dsSecurityDescriptorPropagatorRuntimeQueue = "DS Security Descriptor Propagator Runtime Queue"
dsSecurityDescriptorSubOperationsPerSec = "DS Security Descriptor sub-operations/sec"
dsServerBindsPerSec = "DS Server Binds/sec"
dsServerNameTranslationsPerSec = "DS Server Name Translations/sec"
dsThreadsInUse = "DS Threads in Use"
_ = "Error eventlogs since boot"
_ = "Error events since boot"
externalBindsPerSec = "External Binds/sec"
fastBindsPerSec = "Fast Binds/sec"
_ = "Fatal events since boot"
_ = "Info eventlogs since boot"
ldapActiveThreads = "LDAP Active Threads"
_ = "LDAP Add Operations"
_ = "LDAP Add Operations/sec"
_ = "LDAP batch slots available"
ldapBindTime = "LDAP Bind Time"
_ = "LDAP busy retries"
_ = "LDAP busy retries/sec"
ldapClientSessions = "LDAP Client Sessions"
ldapClosedConnectionsPerSec = "LDAP Closed Connections/sec"
_ = "LDAP Delete Operations"
_ = "LDAP Delete Operations/sec"
_ = "LDAP Modify DN Operations"
_ = "LDAP Modify DN Operations/sec"
_ = "LDAP Modify Operations"
_ = "LDAP Modify Operations/sec"
ldapNewConnectionsPerSec = "LDAP New Connections/sec"
ldapNewSSLConnectionsPerSec = "LDAP New SSL Connections/sec"
_ = "LDAP Outbound Bytes"
_ = "LDAP Outbound Bytes/sec"
_ = "LDAP Page Search Cache entries count"
_ = "LDAP Page Search Cache size"
ldapSearchesPerSec = "LDAP Searches/sec"
ldapSuccessfulBindsPerSec = "LDAP Successful Binds/sec"
_ = "LDAP Threads Sleeping on BUSY"
ldapUDPOperationsPerSec = "LDAP UDP operations/sec"
ldapWritesPerSec = "LDAP Writes/sec"
linkValuesCleanedPerSec = "Link Values Cleaned/sec"
_ = "Links added"
_ = "Links added/sec"
_ = "Links visited"
_ = "Links visited/sec"
_ = "Logical link deletes"
_ = "Logical link deletes/sec"
negotiatedBindsPerSec = "Negotiated Binds/sec"
ntlmBindsPerSec = "NTLM Binds/sec"
_ = "Objects returned"
_ = "Objects returned/sec"
_ = "Objects visited"
_ = "Objects visited/sec"
oneLevelSearchesPerSec = "Onelevel searches/sec"
_ = "PDC failed password update notifications"
_ = "PDC password update notifications/sec"
_ = "PDC successful password update notifications"
phantomsCleanedPerSec = "Phantoms Cleaned/sec"
phantomsVisitedPerSec = "Phantoms Visited/sec"
_ = "Physical link deletes"
_ = "Physical link deletes/sec"
_ = "Replicate Single Object operations"
_ = "Replicate Single Object operations/sec"
_ = "RID Pool invalidations since boot"
_ = "RID Pool request failures since boot"
_ = "RID Pool request successes since boot"
samAccountGroupEvaluationLatency = "SAM Account Group Evaluation Latency"
samDisplayInformationQueriesPerSec = "SAM Display Information Queries/sec"
samDomainLocalGroupMembershipEvaluationsPerSec = "SAM Domain Local Group Membership Evaluations/sec"
samEnumerationsPerSec = "SAM Enumerations/sec"
samGCEvaluationsPerSec = "SAM GC Evaluations/sec"
samGlobalGroupMembershipEvaluationsPerSec = "SAM Global Group Membership Evaluations/sec"
samMachineCreationAttemptsPerSec = "SAM Machine Creation Attempts/sec"
samMembershipChangesPerSec = "SAM Membership Changes/sec"
samNonTransitiveMembershipEvaluationsPerSec = "SAM Non-Transitive Membership Evaluations/sec"
samPasswordChangesPerSec = "SAM Password Changes/sec"
samResourceGroupEvaluationLatency = "SAM Resource Group Evaluation Latency"
samSuccessfulComputerCreationsPerSecIncludesAllRequests = "SAM Successful Computer Creations/sec: Includes all requests"
samSuccessfulUserCreationsPerSec = "SAM Successful User Creations/sec"
samTransitiveMembershipEvaluationsPerSec = "SAM Transitive Membership Evaluations/sec"
samUniversalGroupMembershipEvaluationsPerSec = "SAM Universal Group Membership Evaluations/sec"
samUserCreationAttemptsPerSec = "SAM User Creation Attempts/sec"
simpleBindsPerSec = "Simple Binds/sec"
subtreeSearchesPerSec = "Subtree searches/sec"
tombstonesGarbageCollectedPerSec = "Tombstones Garbage Collected/sec"
tombstonesVisitedPerSec = "Tombstones Visited/sec"
transitiveOperationsMillisecondsRun = "Transitive operations milliseconds run"
transitiveOperationsPerSec = "Transitive operations/sec"
transitiveSubOperationsPerSec = "Transitive suboperations/sec"
_ = "Warning eventlogs since boot"
_ = "Warning events since boot"
)
// Win32_PerfRawData_DirectoryServices_DirectoryServices docs:
// - https://msdn.microsoft.com/en-us/library/ms803980.aspx
type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
Name string
ABANRPersec uint32
ABBrowsesPersec uint32
ABClientSessions uint32
ABMatchesPersec uint32
ABPropertyReadsPersec uint32
ABProxyLookupsPersec uint32
ABSearchesPersec uint32
ApproximatehighestDNT uint32
ATQEstimatedQueueDelay uint32
ATQOutstandingQueuedRequests uint32
ATQRequestLatency uint32
ATQThreadsLDAP uint32
ATQThreadsOther uint32
ATQThreadsTotal uint32
BasesearchesPersec uint32
DatabaseaddsPersec uint32
DatabasedeletesPersec uint32
DatabasemodifysPersec uint32
DatabaserecyclesPersec uint32
DigestBindsPersec uint32
DRAHighestUSNCommittedHighpart uint64
DRAHighestUSNCommittedLowpart uint64
DRAHighestUSNIssuedHighpart uint64
DRAHighestUSNIssuedLowpart uint64
DRAInboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
DRAInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot uint32
DRAInboundBytesCompressedBetweenSitesBeforeCompressionPersec uint32
DRAInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot uint32
DRAInboundBytesNotCompressedWithinSitePersec uint32
DRAInboundBytesNotCompressedWithinSiteSinceBoot uint32
DRAInboundBytesTotalPersec uint32
DRAInboundBytesTotalSinceBoot uint32
DRAInboundFullSyncObjectsRemaining uint32
DRAInboundLinkValueUpdatesRemaininginPacket uint32
DRAInboundObjectsAppliedPersec uint32
DRAInboundObjectsFilteredPersec uint32
DRAInboundObjectsPersec uint32
DRAInboundObjectUpdatesRemaininginPacket uint32
DRAInboundPropertiesAppliedPersec uint32
DRAInboundPropertiesFilteredPersec uint32
DRAInboundPropertiesTotalPersec uint32
DRAInboundTotalUpdatesRemaininginPacket uint32
DRAInboundValuesDNsonlyPersec uint32
DRAInboundValuesTotalPersec uint32
DRAOutboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
DRAOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot uint32
DRAOutboundBytesCompressedBetweenSitesBeforeCompressionPersec uint32
DRAOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot uint32
DRAOutboundBytesNotCompressedWithinSitePersec uint32
DRAOutboundBytesNotCompressedWithinSiteSinceBoot uint32
DRAOutboundBytesTotalPersec uint32
DRAOutboundBytesTotalSinceBoot uint32
DRAOutboundObjectsFilteredPersec uint32
DRAOutboundObjectsPersec uint32
DRAOutboundPropertiesPersec uint32
DRAOutboundValuesDNsonlyPersec uint32
DRAOutboundValuesTotalPersec uint32
DRAPendingReplicationOperations uint32
DRAPendingReplicationSynchronizations uint32
DRASyncFailuresonSchemaMismatch uint32
DRASyncRequestsMade uint32
DRASyncRequestsSuccessful uint32
DRAThreadsGettingNCChanges uint32
DRAThreadsGettingNCChangesHoldingSemaphore uint32
DSClientBindsPersec uint32
DSClientNameTranslationsPersec uint32
DSDirectoryReadsPersec uint32
DSDirectorySearchesPersec uint32
DSDirectoryWritesPersec uint32
DSMonitorListSize uint32
DSNameCachehitrate uint32
DSNameCachehitrate_Base uint32
DSNotifyQueueSize uint32
DSPercentReadsfromDRA uint32
DSPercentReadsfromKCC uint32
DSPercentReadsfromLSA uint32
DSPercentReadsfromNSPI uint32
DSPercentReadsfromNTDSAPI uint32
DSPercentReadsfromSAM uint32
DSPercentReadsOther uint32
DSPercentSearchesfromDRA uint32
DSPercentSearchesfromKCC uint32
DSPercentSearchesfromLDAP uint32
DSPercentSearchesfromLSA uint32
DSPercentSearchesfromNSPI uint32
DSPercentSearchesfromNTDSAPI uint32
DSPercentSearchesfromSAM uint32
DSPercentSearchesOther uint32
DSPercentWritesfromDRA uint32
DSPercentWritesfromKCC uint32
DSPercentWritesfromLDAP uint32
DSPercentWritesfromLSA uint32
DSPercentWritesfromNSPI uint32
DSPercentWritesfromNTDSAPI uint32
DSPercentWritesfromSAM uint32
DSPercentWritesOther uint32
DSSearchsuboperationsPersec uint32
DSSecurityDescriptorPropagationsEvents uint32
DSSecurityDescriptorPropagatorAverageExclusionTime uint32
DSSecurityDescriptorPropagatorRuntimeQueue uint32
DSSecurityDescriptorsuboperationsPersec uint32
DSServerBindsPersec uint32
DSServerNameTranslationsPersec uint32
DSThreadsinUse uint32
ExternalBindsPersec uint32
FastBindsPersec uint32
LDAPActiveThreads uint32
LDAPBindTime uint32
LDAPClientSessions uint32
LDAPClosedConnectionsPersec uint32
LDAPNewConnectionsPersec uint32
LDAPNewSSLConnectionsPersec uint32
LDAPSearchesPersec uint32
LDAPSuccessfulBindsPersec uint32
LDAPUDPoperationsPersec uint32
LDAPWritesPersec uint32
LinkValuesCleanedPersec uint32
NegotiatedBindsPersec uint32
NTLMBindsPersec uint32
OnelevelsearchesPersec uint32
PhantomsCleanedPersec uint32
PhantomsVisitedPersec uint32
SAMAccountGroupEvaluationLatency uint32
SAMDisplayInformationQueriesPersec uint32
SAMDomainLocalGroupMembershipEvaluationsPersec uint32
SAMEnumerationsPersec uint32
SAMGCEvaluationsPersec uint32
SAMGlobalGroupMembershipEvaluationsPersec uint32
SAMMachineCreationAttemptsPersec uint32
SAMMembershipChangesPersec uint32
SAMNonTransitiveMembershipEvaluationsPersec uint32
SAMPasswordChangesPersec uint32
SAMResourceGroupEvaluationLatency uint32
SAMSuccessfulComputerCreationsPersecIncludesallrequests uint32
SAMSuccessfulUserCreationsPersec uint32
SAMTransitiveMembershipEvaluationsPersec uint32
SAMUniversalGroupMembershipEvaluationsPersec uint32
SAMUserCreationAttemptsPersec uint32
SimpleBindsPersec uint32
SubtreesearchesPersec uint32
TombstonesGarbageCollectedPersec uint32
TombstonesVisitedPersec uint32
Transitiveoperationsmillisecondsrun uint32
TransitiveoperationsPersec uint32
TransitivesuboperationsPersec uint32
}

View File

@@ -4,15 +4,16 @@ package adcs
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/pkg/utils" v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "adcs" const Name = "adcs"
@@ -24,6 +25,8 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
perfDataCollector perfdata.Collector
challengeResponseProcessingTime *prometheus.Desc challengeResponseProcessingTime *prometheus.Desc
challengeResponsesPerSecond *prometheus.Desc challengeResponsesPerSecond *prometheus.Desc
failedRequestsPerSecond *prometheus.Desc failedRequestsPerSecond *prometheus.Desc
@@ -60,6 +63,10 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{"Certification Authority"}, nil return []string{"Certification Authority"}, nil
} }
@@ -67,7 +74,32 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
counters := []string{
requestsPerSecond,
requestProcessingTime,
retrievalsPerSecond,
retrievalProcessingTime,
failedRequestsPerSecond,
issuedRequestsPerSecond,
pendingRequestsPerSecond,
requestCryptographicSigningTime,
requestPolicyModuleProcessingTime,
challengeResponsesPerSecond,
challengeResponseProcessingTime,
signedCertificateTimestampListsPerSecond,
signedCertificateTimestampListProcessingTime,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Certification Authority", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
}
}
c.requestsPerSecond = prometheus.NewDesc( c.requestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"), prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Total certificate requests processed", "Total certificate requests processed",
@@ -151,6 +183,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
} }
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if utils.PDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
if err := c.collectADCSCounters(ctx, logger, ch); err != nil { if err := c.collectADCSCounters(ctx, logger, ch); err != nil {
logger.Error("failed collecting ADCS metrics", logger.Error("failed collecting ADCS metrics",
@@ -163,23 +199,6 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
return nil return nil
} }
type perflibADCS struct {
Name string
RequestsPerSecond float64 `perflib:"Requests/sec"`
RequestProcessingTime float64 `perflib:"Request processing time (ms)"`
RetrievalsPerSecond float64 `perflib:"Retrievals/sec"`
RetrievalProcessingTime float64 `perflib:"Retrieval processing time (ms)"`
FailedRequestsPerSecond float64 `perflib:"Failed Requests/sec"`
IssuedRequestsPerSecond float64 `perflib:"Issued Requests/sec"`
PendingRequestsPerSecond float64 `perflib:"Pending Requests/sec"`
RequestCryptographicSigningTime float64 `perflib:"Request cryptographic signing time (ms)"`
RequestPolicyModuleProcessingTime float64 `perflib:"Request policy module processing time (ms)"`
ChallengeResponsesPerSecond float64 `perflib:"Challenge Responses/sec"`
ChallengeResponseProcessingTime float64 `perflib:"Challenge Response processing time (ms)"`
SignedCertificateTimestampListsPerSecond float64 `perflib:"Signed Certificate Timestamp Lists/sec"`
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
}
func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
dst := make([]perflibADCS, 0) dst := make([]perflibADCS, 0)
@@ -187,7 +206,7 @@ func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger *slog.L
return errors.New("perflib did not contain an entry for Certification Authority") return errors.New("perflib did not contain an entry for Certification Authority")
} }
err := perflib.UnmarshalObject(ctx.PerfObjects["Certification Authority"], &dst, logger) err := v1.UnmarshalObject(ctx.PerfObjects["Certification Authority"], &dst, logger)
if err != nil { if err != nil {
return err return err
} }
@@ -197,10 +216,10 @@ func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger *slog.L
} }
for _, d := range dst { for _, d := range dst {
n := strings.ToLower(d.Name) if d.Name == "" {
if n == "" {
continue continue
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.requestsPerSecond, c.requestsPerSecond,
prometheus.CounterValue, prometheus.CounterValue,
@@ -283,3 +302,97 @@ func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger *slog.L
return nil return nil
} }
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Certification Authority (ADCS) metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for Certification Authority (ADCS) returned empty result set")
}
for name, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.requestsPerSecond,
prometheus.CounterValue,
data[requestsPerSecond].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.requestProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data[requestProcessingTime].FirstValue),
name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalsPerSecond,
prometheus.CounterValue,
data[retrievalsPerSecond].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data[retrievalProcessingTime].FirstValue),
name,
)
ch <- prometheus.MustNewConstMetric(
c.failedRequestsPerSecond,
prometheus.CounterValue,
data[failedRequestsPerSecond].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.issuedRequestsPerSecond,
prometheus.CounterValue,
data[issuedRequestsPerSecond].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.pendingRequestsPerSecond,
prometheus.CounterValue,
data[pendingRequestsPerSecond].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.requestCryptographicSigningTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data[requestCryptographicSigningTime].FirstValue),
name,
)
ch <- prometheus.MustNewConstMetric(
c.requestPolicyModuleProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data[requestPolicyModuleProcessingTime].FirstValue),
name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponsesPerSecond,
prometheus.CounterValue,
data[challengeResponsesPerSecond].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponseProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data[challengeResponseProcessingTime].FirstValue),
name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListsPerSecond,
prometheus.CounterValue,
data[signedCertificateTimestampListsPerSecond].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data[signedCertificateTimestampListProcessingTime].FirstValue),
name,
)
}
return nil
}

View File

@@ -3,8 +3,8 @@ package adcs_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs" "github.com/prometheus-community/windows_exporter/internal/collector/adcs"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,34 @@
package adcs
const (
challengeResponseProcessingTime = "Challenge Response processing time (ms)"
challengeResponsesPerSecond = "Challenge Responses/sec"
failedRequestsPerSecond = "Failed Requests/sec"
issuedRequestsPerSecond = "Issued Requests/sec"
pendingRequestsPerSecond = "Pending Requests/sec"
requestCryptographicSigningTime = "Request cryptographic signing time (ms)"
requestPolicyModuleProcessingTime = "Request policy module processing time (ms)"
requestProcessingTime = "Request processing time (ms)"
requestsPerSecond = "Requests/sec"
retrievalProcessingTime = "Retrieval processing time (ms)"
retrievalsPerSecond = "Retrievals/sec"
signedCertificateTimestampListProcessingTime = "Signed Certificate Timestamp List processing time (ms)"
signedCertificateTimestampListsPerSecond = "Signed Certificate Timestamp Lists/sec"
)
type perflibADCS struct {
Name string
RequestsPerSecond float64 `perflib:"Requests/sec"`
RequestProcessingTime float64 `perflib:"Request processing time (ms)"`
RetrievalsPerSecond float64 `perflib:"Retrievals/sec"`
RetrievalProcessingTime float64 `perflib:"Retrieval processing time (ms)"`
FailedRequestsPerSecond float64 `perflib:"Failed Requests/sec"`
IssuedRequestsPerSecond float64 `perflib:"Issued Requests/sec"`
PendingRequestsPerSecond float64 `perflib:"Pending Requests/sec"`
RequestCryptographicSigningTime float64 `perflib:"Request cryptographic signing time (ms)"`
RequestPolicyModuleProcessingTime float64 `perflib:"Request policy module processing time (ms)"`
ChallengeResponsesPerSecond float64 `perflib:"Challenge Responses/sec"`
ChallengeResponseProcessingTime float64 `perflib:"Challenge Response processing time (ms)"`
SignedCertificateTimestampListsPerSecond float64 `perflib:"Signed Certificate Timestamp Lists/sec"`
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
}

View File

@@ -3,14 +3,20 @@
package adfs package adfs
import ( import (
"errors"
"fmt"
"log/slog" "log/slog"
"maps"
"math" "math"
"slices"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "adfs" const Name = "adfs"
@@ -22,6 +28,8 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
perfDataCollector perfdata.Collector
adLoginConnectionFailures *prometheus.Desc adLoginConnectionFailures *prometheus.Desc
artifactDBFailures *prometheus.Desc artifactDBFailures *prometheus.Desc
avgArtifactDBQueryTime *prometheus.Desc avgArtifactDBQueryTime *prometheus.Desc
@@ -63,8 +71,8 @@ type Collector struct {
upAuthenticationFailures *prometheus.Desc upAuthenticationFailures *prometheus.Desc
upAuthentications *prometheus.Desc upAuthentications *prometheus.Desc
windowsIntegratedAuthentications *prometheus.Desc windowsIntegratedAuthentications *prometheus.Desc
wsfedTokenRequests *prometheus.Desc wsFedTokenRequests *prometheus.Desc
wstrustTokenRequests *prometheus.Desc wsTrustTokenRequests *prometheus.Desc
} }
func New(config *Config) *Collector { func New(config *Config) *Collector {
@@ -88,6 +96,10 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{"AD FS"}, nil return []string{"AD FS"}, nil
} }
@@ -95,7 +107,62 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
counters := []string{
adLoginConnectionFailures,
certificateAuthentications,
deviceAuthentications,
extranetAccountLockouts,
federatedAuthentications,
passportAuthentications,
passiveRequests,
passwordChangeFailed,
passwordChangeSucceeded,
tokenRequests,
windowsIntegratedAuthentications,
oAuthAuthZRequests,
oAuthClientAuthentications,
oAuthClientAuthenticationFailures,
oAuthClientCredentialRequestFailures,
oAuthClientCredentialRequests,
oAuthClientPrivateKeyJWTAuthenticationFailures,
oAuthClientPrivateKeyJWTAuthentications,
oAuthClientBasicAuthenticationFailures,
oAuthClientBasicAuthentications,
oAuthClientSecretPostAuthenticationFailures,
oAuthClientSecretPostAuthentications,
oAuthClientWindowsAuthenticationFailures,
oAuthClientWindowsAuthentications,
oAuthLogonCertRequestFailures,
oAuthLogonCertTokenRequests,
oAuthPasswordGrantRequestFailures,
oAuthPasswordGrantRequests,
oAuthTokenRequests,
samlPTokenRequests,
ssoAuthenticationFailures,
ssoAuthentications,
wsFedTokenRequests,
wsTrustTokenRequests,
usernamePasswordAuthenticationFailures,
usernamePasswordAuthentications,
externalAuthentications,
externalAuthNFailures,
artifactDBFailures,
avgArtifactDBQueryTime,
configDBFailures,
avgConfigDBQueryTime,
federationMetadataRequests,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "AD FS", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err)
}
}
c.adLoginConnectionFailures = prometheus.NewDesc( c.adLoginConnectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"), prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
"Total number of connection failures to an Active Directory domain controller", "Total number of connection failures to an Active Directory domain controller",
@@ -288,13 +355,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
nil, nil,
nil, nil,
) )
c.wsfedTokenRequests = prometheus.NewDesc( c.wsFedTokenRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "wsfed_token_requests_success_total"), prometheus.BuildFQName(types.Namespace, Name, "wsfed_token_requests_success_total"),
"Total number of successful RP tokens issued over WS-Fed protocol", "Total number of successful RP tokens issued over WS-Fed protocol",
nil, nil,
nil, nil,
) )
c.wstrustTokenRequests = prometheus.NewDesc( c.wsTrustTokenRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "wstrust_token_requests_success_total"), prometheus.BuildFQName(types.Namespace, Name, "wstrust_token_requests_success_total"),
"Total number of successful RP tokens issued over WS-Trust protocol", "Total number of successful RP tokens issued over WS-Trust protocol",
nil, nil,
@@ -358,58 +425,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
return nil return nil
} }
type perflibADFS struct {
AdLoginConnectionFailures float64 `perflib:"AD Login Connection Failures"`
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
DeviceAuthentications float64 `perflib:"Device Authentications"`
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
FederatedAuthentications float64 `perflib:"Federated Authentications"`
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
PassiveRequests float64 `perflib:"Passive Requests"`
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
TokenRequests float64 `perflib:"Token Requests"`
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
OAuthAuthZRequests float64 `perflib:"OAuth AuthZ Requests"`
OAuthClientAuthentications float64 `perflib:"OAuth Client Authentications"`
OAuthClientAuthenticationFailures float64 `perflib:"OAuth Client Authentications Failures"`
OAuthClientCredentialRequestFailures float64 `perflib:"OAuth Client Credentials Request Failures"`
OAuthClientCredentialRequests float64 `perflib:"OAuth Client Credentials Requests"`
OAuthClientPrivKeyJWTAuthnFailures float64 `perflib:"OAuth Client Private Key Jwt Authentication Failures"`
OAuthClientPrivKeyJWTAuthentications float64 `perflib:"OAuth Client Private Key Jwt Authentications"`
OAuthClientBasicAuthnFailures float64 `perflib:"OAuth Client Secret Basic Authentication Failures"`
OAuthClientBasicAuthentications float64 `perflib:"OAuth Client Secret Basic Authentication Requests"`
OAuthClientSecretPostAuthnFailures float64 `perflib:"OAuth Client Secret Post Authentication Failures"`
OAuthClientSecretPostAuthentications float64 `perflib:"OAuth Client Secret Post Authentications"`
OAuthClientWindowsAuthnFailures float64 `perflib:"OAuth Client Windows Integrated Authentication Failures"`
OAuthClientWindowsAuthentications float64 `perflib:"OAuth Client Windows Integrated Authentications"`
OAuthLogonCertRequestFailures float64 `perflib:"OAuth Logon Certificate Request Failures"`
OAuthLogonCertTokenRequests float64 `perflib:"OAuth Logon Certificate Token Requests"`
OAuthPasswordGrantRequestFailures float64 `perflib:"OAuth Password Grant Request Failures"`
OAuthPasswordGrantRequests float64 `perflib:"OAuth Password Grant Requests"`
OAuthTokenRequests float64 `perflib:"OAuth Token Requests"`
SAMLPTokenRequests float64 `perflib:"SAML-P Token Requests"`
SSOAuthenticationFailures float64 `perflib:"SSO Authentication Failures"`
SSOAuthentications float64 `perflib:"SSO Authentications"`
WSFedTokenRequests float64 `perflib:"WS-Fed Token Requests"`
WSTrustTokenRequests float64 `perflib:"WS-Trust Token Requests"`
UsernamePasswordAuthnFailures float64 `perflib:"U/P Authentication Failures"`
UsernamePasswordAuthentications float64 `perflib:"U/P Authentications"`
ExternalAuthentications float64 `perflib:"External Authentications"`
ExternalAuthNFailures float64 `perflib:"External Authentication Failures"`
ArtifactDBFailures float64 `perflib:"Artifact Database Connection Failures"`
AvgArtifactDBQueryTime float64 `perflib:"Average Artifact Database Query Time"`
ConfigDBFailures float64 `perflib:"Configuration Database Connection Failures"`
AvgConfigDBQueryTime float64 `perflib:"Average Config Database Query Time"`
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
}
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if utils.PDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
return c.collect(ctx, logger, ch)
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var adfsData []perflibADFS var adfsData []perflibADFS
err := perflib.UnmarshalObject(ctx.PerfObjects["AD FS"], &adfsData, logger) err := v1.UnmarshalObject(ctx.PerfObjects["AD FS"], &adfsData, logger)
if err != nil { if err != nil {
return err return err
} }
@@ -607,13 +636,13 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.wsfedTokenRequests, c.wsFedTokenRequests,
prometheus.CounterValue, prometheus.CounterValue,
adfsData[0].WSFedTokenRequests, adfsData[0].WSFedTokenRequests,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.wstrustTokenRequests, c.wsTrustTokenRequests,
prometheus.CounterValue, prometheus.CounterValue,
adfsData[0].WSTrustTokenRequests, adfsData[0].WSTrustTokenRequests,
) )
@@ -674,3 +703,282 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
return nil return nil
} }
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect ADFS metrics: %w", err)
}
instanceKey := slices.Collect(maps.Keys(data))
if len(instanceKey) == 0 {
return errors.New("perflib query for ADFS returned empty result set")
}
adfsData, ok := data[instanceKey[0]]
if !ok {
return errors.New("perflib query for ADFS returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.adLoginConnectionFailures,
prometheus.CounterValue,
adfsData[adLoginConnectionFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.certificateAuthentications,
prometheus.CounterValue,
adfsData[certificateAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.deviceAuthentications,
prometheus.CounterValue,
adfsData[deviceAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.extranetAccountLockouts,
prometheus.CounterValue,
adfsData[extranetAccountLockouts].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.federatedAuthentications,
prometheus.CounterValue,
adfsData[federatedAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.passportAuthentications,
prometheus.CounterValue,
adfsData[passportAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.passiveRequests,
prometheus.CounterValue,
adfsData[passiveRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.passwordChangeFailed,
prometheus.CounterValue,
adfsData[passwordChangeFailed].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.passwordChangeSucceeded,
prometheus.CounterValue,
adfsData[passwordChangeSucceeded].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.tokenRequests,
prometheus.CounterValue,
adfsData[tokenRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.windowsIntegratedAuthentications,
prometheus.CounterValue,
adfsData[windowsIntegratedAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthAuthZRequests,
prometheus.CounterValue,
adfsData[oAuthAuthZRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthentications,
prometheus.CounterValue,
adfsData[oAuthClientAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthenticationsFailures,
prometheus.CounterValue,
adfsData[oAuthClientAuthenticationFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequestFailures,
prometheus.CounterValue,
adfsData[oAuthClientCredentialRequestFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequests,
prometheus.CounterValue,
adfsData[oAuthClientCredentialRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthenticationFailures,
prometheus.CounterValue,
adfsData[oAuthClientPrivateKeyJWTAuthenticationFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthentications,
prometheus.CounterValue,
adfsData[oAuthClientPrivateKeyJWTAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthenticationFailures,
prometheus.CounterValue,
adfsData[oAuthClientBasicAuthenticationFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthentications,
prometheus.CounterValue,
adfsData[oAuthClientBasicAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthenticationFailures,
prometheus.CounterValue,
adfsData[oAuthClientSecretPostAuthenticationFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthentications,
prometheus.CounterValue,
adfsData[oAuthClientSecretPostAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthenticationFailures,
prometheus.CounterValue,
adfsData[oAuthClientWindowsAuthenticationFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthentications,
prometheus.CounterValue,
adfsData[oAuthClientWindowsAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateRequestFailures,
prometheus.CounterValue,
adfsData[oAuthLogonCertRequestFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateTokenRequests,
prometheus.CounterValue,
adfsData[oAuthLogonCertTokenRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequestFailures,
prometheus.CounterValue,
adfsData[oAuthPasswordGrantRequestFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequests,
prometheus.CounterValue,
adfsData[oAuthPasswordGrantRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthTokenRequests,
prometheus.CounterValue,
adfsData[oAuthTokenRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.samlPTokenRequests,
prometheus.CounterValue,
adfsData[samlPTokenRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthenticationFailures,
prometheus.CounterValue,
adfsData[ssoAuthenticationFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthentications,
prometheus.CounterValue,
adfsData[ssoAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.wsFedTokenRequests,
prometheus.CounterValue,
adfsData[wsFedTokenRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.wsTrustTokenRequests,
prometheus.CounterValue,
adfsData[wsTrustTokenRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthenticationFailures,
prometheus.CounterValue,
adfsData[usernamePasswordAuthenticationFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthentications,
prometheus.CounterValue,
adfsData[usernamePasswordAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthenticationFailures,
prometheus.CounterValue,
adfsData[externalAuthNFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthentications,
prometheus.CounterValue,
adfsData[externalAuthentications].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.artifactDBFailures,
prometheus.CounterValue,
adfsData[artifactDBFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.avgArtifactDBQueryTime,
prometheus.CounterValue,
adfsData[avgArtifactDBQueryTime].FirstValue*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.configDBFailures,
prometheus.CounterValue,
adfsData[configDBFailures].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.avgConfigDBQueryTime,
prometheus.CounterValue,
adfsData[avgConfigDBQueryTime].FirstValue*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.federationMetadataRequests,
prometheus.CounterValue,
adfsData[federationMetadataRequests].FirstValue,
)
return nil
}

View File

@@ -3,8 +3,8 @@ package adfs_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs" "github.com/prometheus-community/windows_exporter/internal/collector/adfs"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,93 @@
package adfs
const (
adLoginConnectionFailures = "AD Login Connection Failures"
artifactDBFailures = "Artifact Database Connection Failures"
avgArtifactDBQueryTime = "Average Artifact Database Query Time"
avgConfigDBQueryTime = "Average Config Database Query Time"
certificateAuthentications = "Certificate Authentications"
configDBFailures = "Configuration Database Connection Failures"
deviceAuthentications = "Device Authentications"
externalAuthentications = "External Authentications"
externalAuthNFailures = "External Authentication Failures"
extranetAccountLockouts = "Extranet Account Lockouts"
federatedAuthentications = "Federated Authentications"
federationMetadataRequests = "Federation Metadata Requests"
oAuthAuthZRequests = "OAuth AuthZ Requests"
oAuthClientAuthenticationFailures = "OAuth Client Authentications Failures"
oAuthClientAuthentications = "OAuth Client Authentications"
oAuthClientBasicAuthenticationFailures = "OAuth Client Secret Basic Authentication Failures"
oAuthClientBasicAuthentications = "OAuth Client Secret Basic Authentication Requests"
oAuthClientCredentialRequestFailures = "OAuth Client Credentials Request Failures"
oAuthClientCredentialRequests = "OAuth Client Credentials Requests"
oAuthClientPrivateKeyJWTAuthenticationFailures = "OAuth Client Private Key Jwt Authentication Failures"
oAuthClientPrivateKeyJWTAuthentications = "OAuth Client Private Key Jwt Authentications"
oAuthClientSecretPostAuthenticationFailures = "OAuth Client Secret Post Authentication Failures"
oAuthClientSecretPostAuthentications = "OAuth Client Secret Post Authentications"
oAuthClientWindowsAuthenticationFailures = "OAuth Client Windows Integrated Authentication Failures"
oAuthClientWindowsAuthentications = "OAuth Client Windows Integrated Authentications"
oAuthLogonCertRequestFailures = "OAuth Logon Certificate Request Failures"
oAuthLogonCertTokenRequests = "OAuth Logon Certificate Token Requests"
oAuthPasswordGrantRequestFailures = "OAuth Password Grant Request Failures"
oAuthPasswordGrantRequests = "OAuth Password Grant Requests"
oAuthTokenRequests = "OAuth Token Requests"
passiveRequests = "Passive Requests"
passportAuthentications = "Microsoft Passport Authentications"
passwordChangeFailed = "Password Change Failed Requests"
passwordChangeSucceeded = "Password Change Successful Requests"
samlPTokenRequests = "SAML-P Token Requests"
ssoAuthenticationFailures = "SSO Authentication Failures"
ssoAuthentications = "SSO Authentications"
tokenRequests = "Token Requests"
usernamePasswordAuthenticationFailures = "U/P Authentication Failures"
usernamePasswordAuthentications = "U/P Authentications"
windowsIntegratedAuthentications = "Windows Integrated Authentications"
wsFedTokenRequests = "WS-Fed Token Requests"
wsTrustTokenRequests = "WS-Trust Token Requests"
)
type perflibADFS struct {
AdLoginConnectionFailures float64 `perflib:"AD Login Connection Failures"`
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
DeviceAuthentications float64 `perflib:"Device Authentications"`
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
FederatedAuthentications float64 `perflib:"Federated Authentications"`
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
PassiveRequests float64 `perflib:"Passive Requests"`
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
TokenRequests float64 `perflib:"Token Requests"`
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
OAuthAuthZRequests float64 `perflib:"OAuth AuthZ Requests"`
OAuthClientAuthentications float64 `perflib:"OAuth Client Authentications"`
OAuthClientAuthenticationFailures float64 `perflib:"OAuth Client Authentications Failures"`
OAuthClientCredentialRequestFailures float64 `perflib:"OAuth Client Credentials Request Failures"`
OAuthClientCredentialRequests float64 `perflib:"OAuth Client Credentials Requests"`
OAuthClientPrivKeyJWTAuthnFailures float64 `perflib:"OAuth Client Private Key Jwt Authentication Failures"`
OAuthClientPrivKeyJWTAuthentications float64 `perflib:"OAuth Client Private Key Jwt Authentications"`
OAuthClientBasicAuthnFailures float64 `perflib:"OAuth Client Secret Basic Authentication Failures"`
OAuthClientBasicAuthentications float64 `perflib:"OAuth Client Secret Basic Authentication Requests"`
OAuthClientSecretPostAuthnFailures float64 `perflib:"OAuth Client Secret Post Authentication Failures"`
OAuthClientSecretPostAuthentications float64 `perflib:"OAuth Client Secret Post Authentications"`
OAuthClientWindowsAuthnFailures float64 `perflib:"OAuth Client Windows Integrated Authentication Failures"`
OAuthClientWindowsAuthentications float64 `perflib:"OAuth Client Windows Integrated Authentications"`
OAuthLogonCertRequestFailures float64 `perflib:"OAuth Logon Certificate Request Failures"`
OAuthLogonCertTokenRequests float64 `perflib:"OAuth Logon Certificate Token Requests"`
OAuthPasswordGrantRequestFailures float64 `perflib:"OAuth Password Grant Request Failures"`
OAuthPasswordGrantRequests float64 `perflib:"OAuth Password Grant Requests"`
OAuthTokenRequests float64 `perflib:"OAuth Token Requests"`
SAMLPTokenRequests float64 `perflib:"SAML-P Token Requests"`
SSOAuthenticationFailures float64 `perflib:"SSO Authentication Failures"`
SSOAuthentications float64 `perflib:"SSO Authentications"`
WSFedTokenRequests float64 `perflib:"WS-Fed Token Requests"`
WSTrustTokenRequests float64 `perflib:"WS-Trust Token Requests"`
UsernamePasswordAuthnFailures float64 `perflib:"U/P Authentication Failures"`
UsernamePasswordAuthentications float64 `perflib:"U/P Authentications"`
ExternalAuthentications float64 `perflib:"External Authentications"`
ExternalAuthNFailures float64 `perflib:"External Authentication Failures"`
ArtifactDBFailures float64 `perflib:"Artifact Database Connection Failures"`
AvgArtifactDBQueryTime float64 `perflib:"Average Artifact Database Query Time"`
ConfigDBFailures float64 `perflib:"Configuration Database Connection Failures"`
AvgConfigDBQueryTime float64 `perflib:"Average Config Database Query Time"`
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
}

View File

@@ -4,13 +4,17 @@ package cache
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "cache" const Name = "cache"
@@ -23,6 +27,8 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
perfDataCollector perfdata.Collector
asyncCopyReadsTotal *prometheus.Desc asyncCopyReadsTotal *prometheus.Desc
asyncDataMapsTotal *prometheus.Desc asyncDataMapsTotal *prometheus.Desc
asyncFastReadsTotal *prometheus.Desc asyncFastReadsTotal *prometheus.Desc
@@ -75,6 +81,10 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{"Cache"}, nil return []string{"Cache"}, nil
} }
@@ -82,7 +92,48 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
counters := []string{
asyncCopyReadsTotal,
asyncDataMapsTotal,
asyncFastReadsTotal,
asyncMDLReadsTotal,
asyncPinReadsTotal,
copyReadHitsTotal,
copyReadsTotal,
dataFlushesTotal,
dataFlushPagesTotal,
dataMapHitsPercent,
dataMapPinsTotal,
dataMapsTotal,
dirtyPages,
dirtyPageThreshold,
fastReadNotPossiblesTotal,
fastReadResourceMissesTotal,
fastReadsTotal,
lazyWriteFlushesTotal,
lazyWritePagesTotal,
mdlReadHitsTotal,
mdlReadsTotal,
pinReadHitsTotal,
pinReadsTotal,
readAheadsTotal,
syncCopyReadsTotal,
syncDataMapsTotal,
syncFastReadsTotal,
syncMDLReadsTotal,
syncPinReadsTotal,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Cache", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err)
}
}
c.asyncCopyReadsTotal = prometheus.NewDesc( c.asyncCopyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"), prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)", "(AsyncCopyReadsTotal)",
@@ -263,6 +314,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
// Collect implements the Collector interface. // Collect implements the Collector interface.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if utils.PDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
if err := c.collect(ctx, logger, ch); err != nil { if err := c.collect(ctx, logger, ch); err != nil {
logger.Error("failed collecting cache metrics", logger.Error("failed collecting cache metrics",
@@ -275,46 +330,10 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
return nil return nil
} }
// Perflib "Cache":
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
type perflibCache struct {
AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"`
AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"`
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
DataMapsTotal float64 `perflib:"Data Maps/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
FastReadsTotal float64 `perflib:"Fast Reads/sec"`
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
PinReadsTotal float64 `perflib:"Pin Reads/sec"`
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"`
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
var dst []perflibCache // Single-instance class, array is required but will have single entry. var dst []perflibCache // Single-instance class, array is required but will have single entry.
if err := perflib.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, logger); err != nil {
return err return err
} }
@@ -498,3 +517,192 @@ func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
return nil return nil
} }
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Cache metrics: %w", err)
}
cacheData, ok := data[perftypes.EmptyInstance]
if !ok {
return errors.New("perflib query for Cache returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.asyncCopyReadsTotal,
prometheus.CounterValue,
cacheData[asyncCopyReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.asyncDataMapsTotal,
prometheus.CounterValue,
cacheData[asyncDataMapsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.asyncFastReadsTotal,
prometheus.CounterValue,
cacheData[asyncFastReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.asyncMDLReadsTotal,
prometheus.CounterValue,
cacheData[asyncMDLReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.asyncPinReadsTotal,
prometheus.CounterValue,
cacheData[asyncPinReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.copyReadHitsTotal,
prometheus.GaugeValue,
cacheData[copyReadHitsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.copyReadsTotal,
prometheus.CounterValue,
cacheData[copyReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.dataFlushesTotal,
prometheus.CounterValue,
cacheData[dataFlushesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.dataFlushPagesTotal,
prometheus.CounterValue,
cacheData[dataFlushPagesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapHitsPercent,
prometheus.GaugeValue,
cacheData[dataMapHitsPercent].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapPinsTotal,
prometheus.CounterValue,
cacheData[dataMapPinsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapsTotal,
prometheus.CounterValue,
cacheData[dataMapsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPages,
prometheus.GaugeValue,
cacheData[dirtyPages].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPageThreshold,
prometheus.GaugeValue,
cacheData[dirtyPageThreshold].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadNotPossiblesTotal,
prometheus.CounterValue,
cacheData[fastReadNotPossiblesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadResourceMissesTotal,
prometheus.CounterValue,
cacheData[fastReadResourceMissesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadsTotal,
prometheus.CounterValue,
cacheData[fastReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWriteFlushesTotal,
prometheus.CounterValue,
cacheData[lazyWriteFlushesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWritePagesTotal,
prometheus.CounterValue,
cacheData[lazyWritePagesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadHitsTotal,
prometheus.CounterValue,
cacheData[mdlReadHitsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadsTotal,
prometheus.CounterValue,
cacheData[mdlReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadHitsTotal,
prometheus.CounterValue,
cacheData[pinReadHitsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadsTotal,
prometheus.CounterValue,
cacheData[pinReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.readAheadsTotal,
prometheus.CounterValue,
cacheData[readAheadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.syncCopyReadsTotal,
prometheus.CounterValue,
cacheData[syncCopyReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.syncDataMapsTotal,
prometheus.CounterValue,
cacheData[syncDataMapsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.syncFastReadsTotal,
prometheus.CounterValue,
cacheData[syncFastReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.syncMDLReadsTotal,
prometheus.CounterValue,
cacheData[syncMDLReadsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.syncPinReadsTotal,
prometheus.CounterValue,
cacheData[syncPinReadsTotal].FirstValue,
)
return nil
}

View File

@@ -3,8 +3,8 @@ package cache_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/cache" "github.com/prometheus-community/windows_exporter/internal/collector/cache"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

69
internal/collector/cache/const.go vendored Normal file
View File

@@ -0,0 +1,69 @@
package cache
// Perflib "Cache":
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
const (
asyncCopyReadsTotal = "Async Copy Reads/sec"
asyncDataMapsTotal = "Async Data Maps/sec"
asyncFastReadsTotal = "Async Fast Reads/sec"
asyncMDLReadsTotal = "Async MDL Reads/sec"
asyncPinReadsTotal = "Async Pin Reads/sec"
copyReadHitsTotal = "Copy Read Hits %"
copyReadsTotal = "Copy Reads/sec"
dataFlushesTotal = "Data Flushes/sec"
dataFlushPagesTotal = "Data Flush Pages/sec"
dataMapHitsPercent = "Data Map Hits %"
dataMapPinsTotal = "Data Map Pins/sec"
dataMapsTotal = "Data Maps/sec"
dirtyPages = "Dirty Pages"
dirtyPageThreshold = "Dirty Page Threshold"
fastReadNotPossiblesTotal = "Fast Read Not Possibles/sec"
fastReadResourceMissesTotal = "Fast Read Resource Misses/sec"
fastReadsTotal = "Fast Reads/sec"
lazyWriteFlushesTotal = "Lazy Write Flushes/sec"
lazyWritePagesTotal = "Lazy Write Pages/sec"
mdlReadHitsTotal = "MDL Read Hits %"
mdlReadsTotal = "MDL Reads/sec"
pinReadHitsTotal = "Pin Read Hits %"
pinReadsTotal = "Pin Reads/sec"
readAheadsTotal = "Read Aheads/sec"
syncCopyReadsTotal = "Sync Copy Reads/sec"
syncDataMapsTotal = "Sync Data Maps/sec"
syncFastReadsTotal = "Sync Fast Reads/sec"
syncMDLReadsTotal = "Sync MDL Reads/sec"
syncPinReadsTotal = "Sync Pin Reads/sec"
)
// Perflib "Cache":
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
type perflibCache struct {
AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"`
AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"`
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
DataMapsTotal float64 `perflib:"Data Maps/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
FastReadsTotal float64 `perflib:"Fast Reads/sec"`
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
PinReadsTotal float64 `perflib:"Pin Reads/sec"`
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"`
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
}

View File

@@ -10,10 +10,10 @@ import (
"github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "container" const Name = "container"
@@ -86,7 +86,7 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
c.containerAvailable = prometheus.NewDesc( c.containerAvailable = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available"), prometheus.BuildFQName(types.Namespace, Name, "available"),
"Available", "Available",
@@ -325,19 +325,19 @@ func (c *Collector) collectContainer(logger *slog.Logger, ch chan<- prometheus.M
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.runtimeTotal, c.runtimeTotal,
prometheus.CounterValue, prometheus.CounterValue,
float64(containerStats.Processor.TotalRuntime100ns)*perflib.TicksToSecondScaleFactor, float64(containerStats.Processor.TotalRuntime100ns)*perftypes.TicksToSecondScaleFactor,
containerIdWithPrefix, containerIdWithPrefix,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.runtimeUser, c.runtimeUser,
prometheus.CounterValue, prometheus.CounterValue,
float64(containerStats.Processor.RuntimeUser100ns)*perflib.TicksToSecondScaleFactor, float64(containerStats.Processor.RuntimeUser100ns)*perftypes.TicksToSecondScaleFactor,
containerIdWithPrefix, containerIdWithPrefix,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.runtimeKernel, c.runtimeKernel,
prometheus.CounterValue, prometheus.CounterValue,
float64(containerStats.Processor.RuntimeKernel100ns)*perflib.TicksToSecondScaleFactor, float64(containerStats.Processor.RuntimeKernel100ns)*perftypes.TicksToSecondScaleFactor,
containerIdWithPrefix, containerIdWithPrefix,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(

View File

@@ -3,8 +3,8 @@ package container_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/container" "github.com/prometheus-community/windows_exporter/internal/collector/container"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,57 @@
package cpu
// Processor performance counters.
const (
c1TimeSeconds = "% C1 Time"
c2TimeSeconds = "% C2 Time"
c3TimeSeconds = "% C3 Time"
c1TransitionsTotal = "C1 Transitions/sec"
c2TransitionsTotal = "C2 Transitions/sec"
c3TransitionsTotal = "C3 Transitions/sec"
clockInterruptsTotal = "Clock Interrupts/sec"
dpcQueuedPerSecond = "DPCs Queued/sec"
dpcTimeSeconds = "% DPC Time"
idleBreakEventsTotal = "Idle Break Events/sec"
idleTimeSeconds = "% Idle Time"
interruptsTotal = "Interrupts/sec"
interruptTimeSeconds = "% Interrupt Time"
parkingStatus = "Parking Status"
performanceLimitPercent = "% Performance Limit"
priorityTimeSeconds = "% Priority Time"
privilegedTimeSeconds = "% Privileged Time"
privilegedUtilitySeconds = "% Privileged Utility"
processorFrequencyMHz = "Processor Frequency"
processorPerformance = "% Processor Performance"
processorTimeSeconds = "% Processor Time"
processorUtilityRate = "% Processor Utility"
userTimeSeconds = "% User Time"
)
type perflibProcessorInformation struct {
Name string
C1TimeSeconds float64 `perflib:"% C1 Time"`
C2TimeSeconds float64 `perflib:"% C2 Time"`
C3TimeSeconds float64 `perflib:"% C3 Time"`
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
DPCTimeSeconds float64 `perflib:"% DPC Time"`
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
IdleTimeSeconds float64 `perflib:"% Idle Time"`
InterruptsTotal float64 `perflib:"Interrupts/sec"`
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
ParkingStatus float64 `perflib:"Parking Status"`
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
ProcessorPerformance float64 `perflib:"% Processor Performance"`
ProcessorMPerf float64 `perflib:"% Processor Performance,secondvalue"`
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
ProcessorRTC float64 `perflib:"% Processor Utility,secondvalue"`
UserTimeSeconds float64 `perflib:"% User Time"`
}

View File

@@ -8,12 +8,12 @@ import (
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perfdata" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/pkg/types" v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "cpu" const Name = "cpu"
@@ -25,7 +25,10 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
perfDataCollector *perfdata.Collector perfDataCollector perfdata.Collector
processorRTCValues map[string]utils.Counter
processorMPerfValues map[string]utils.Counter
logicalProcessors *prometheus.Desc logicalProcessors *prometheus.Desc
cStateSecondsTotal *prometheus.Desc cStateSecondsTotal *prometheus.Desc
@@ -75,37 +78,37 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() { if utils.PDHEnabled() {
counters := []string{ counters := []string{
C1TimeSeconds, c1TimeSeconds,
C2TimeSeconds, c2TimeSeconds,
C3TimeSeconds, c3TimeSeconds,
C1TransitionsTotal, c1TransitionsTotal,
C2TransitionsTotal, c2TransitionsTotal,
C3TransitionsTotal, c3TransitionsTotal,
ClockInterruptsTotal, clockInterruptsTotal,
DPCsQueuedTotal, dpcQueuedPerSecond,
DPCTimeSeconds, dpcTimeSeconds,
IdleBreakEventsTotal, idleBreakEventsTotal,
IdleTimeSeconds, idleTimeSeconds,
InterruptsTotal, interruptsTotal,
InterruptTimeSeconds, interruptTimeSeconds,
ParkingStatus, parkingStatus,
PerformanceLimitPercent, performanceLimitPercent,
PriorityTimeSeconds, priorityTimeSeconds,
PrivilegedTimeSeconds, privilegedTimeSeconds,
PrivilegedUtilitySeconds, privilegedUtilitySeconds,
ProcessorFrequencyMHz, processorFrequencyMHz,
ProcessorPerformance, processorPerformance,
ProcessorTimeSeconds, processorTimeSeconds,
ProcessorUtilityRate, processorUtilityRate,
UserTimeSeconds, userTimeSeconds,
} }
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector("Processor Information", []string{"*"}, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Processor Information", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err) return fmt.Errorf("failed to create Processor Information collector: %w", err)
} }
@@ -142,7 +145,6 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
[]string{"core"}, []string{"core"},
nil, nil,
) )
c.cStateSecondsTotal = prometheus.NewDesc( c.cStateSecondsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"), prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"),
"Time spent in low-power idle state", "Time spent in low-power idle state",
@@ -222,52 +224,26 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
nil, nil,
) )
c.processorRTCValues = map[string]utils.Counter{}
c.processorMPerfValues = map[string]utils.Counter{}
return nil return nil
} }
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if utils.PDHEnabled() { if utils.PDHEnabled() {
return c.collectPDH(ch) return c.collectPDH(ch)
} }
return c.collectFull(ctx, logger, ch) logger = logger.With(slog.String("collector", Name))
}
type perflibProcessorInformation struct { return c.collectFull(ctx, logger, ch)
Name string
C1TimeSeconds float64 `perflib:"% C1 Time"`
C2TimeSeconds float64 `perflib:"% C2 Time"`
C3TimeSeconds float64 `perflib:"% C3 Time"`
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
DPCTimeSeconds float64 `perflib:"% DPC Time"`
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
IdleTimeSeconds float64 `perflib:"% Idle Time"`
InterruptsTotal float64 `perflib:"Interrupts/sec"`
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
ParkingStatus float64 `perflib:"Parking Status"`
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
ProcessorPerformance float64 `perflib:"% Processor Performance"`
ProcessorMPerf float64 `perflib:"% Processor Performance,secondvalue"`
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
ProcessorRTC float64 `perflib:"% Processor Utility,secondvalue"`
UserTimeSeconds float64 `perflib:"% User Time"`
} }
func (c *Collector) collectFull(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectFull(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessorInformation, 0) data := make([]perflibProcessorInformation, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Processor Information"], &data, logger) err := v1.UnmarshalObject(ctx.PerfObjects["Processor Information"], &data, logger)
if err != nil { if err != nil {
return err return err
} }
@@ -281,6 +257,28 @@ func (c *Collector) collectFull(ctx *types.ScrapeContext, logger *slog.Logger, c
core := cpu.Name core := cpu.Name
var (
counterProcessorRTCValues utils.Counter
counterProcessorMPerfValues utils.Counter
ok bool
)
if counterProcessorRTCValues, ok = c.processorRTCValues[core]; ok {
counterProcessorRTCValues.AddValue(uint32(cpu.ProcessorRTC))
} else {
counterProcessorRTCValues = utils.NewCounter(uint32(cpu.ProcessorRTC))
}
c.processorRTCValues[core] = counterProcessorRTCValues
if counterProcessorMPerfValues, ok = c.processorMPerfValues[core]; ok {
counterProcessorMPerfValues.AddValue(uint32(cpu.ProcessorMPerf))
} else {
counterProcessorMPerfValues = utils.NewCounter(uint32(cpu.ProcessorMPerf))
}
c.processorMPerfValues[core] = counterProcessorMPerfValues
coreCount++ coreCount++
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
@@ -380,13 +378,13 @@ func (c *Collector) collectFull(ctx *types.ScrapeContext, logger *slog.Logger, c
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorMPerf, c.processorMPerf,
prometheus.CounterValue, prometheus.CounterValue,
cpu.ProcessorMPerf, counterProcessorMPerfValues.Value(),
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorRTC, c.processorRTC,
prometheus.CounterValue, prometheus.CounterValue,
cpu.ProcessorRTC, counterProcessorRTCValues.Value(),
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
@@ -423,122 +421,144 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
for core, coreData := range data { for core, coreData := range data {
coreCount++ coreCount++
var (
counterProcessorRTCValues utils.Counter
counterProcessorMPerfValues utils.Counter
ok bool
)
if counterProcessorRTCValues, ok = c.processorRTCValues[core]; ok {
counterProcessorRTCValues.AddValue(uint32(coreData[processorUtilityRate].SecondValue))
} else {
counterProcessorRTCValues = utils.NewCounter(uint32(coreData[privilegedUtilitySeconds].SecondValue))
}
c.processorRTCValues[core] = counterProcessorRTCValues
if counterProcessorMPerfValues, ok = c.processorMPerfValues[core]; ok {
counterProcessorMPerfValues.AddValue(uint32(coreData[processorPerformance].SecondValue))
} else {
counterProcessorMPerfValues = utils.NewCounter(uint32(coreData[processorPerformance].SecondValue))
}
c.processorMPerfValues[core] = counterProcessorMPerfValues
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal, c.cStateSecondsTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[C1TimeSeconds].FirstValue, coreData[c1TimeSeconds].FirstValue,
core, "c1", core, "c1",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal, c.cStateSecondsTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[C2TimeSeconds].FirstValue, coreData[c2TimeSeconds].FirstValue,
core, "c2", core, "c2",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal, c.cStateSecondsTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[C3TimeSeconds].FirstValue, coreData[c3TimeSeconds].FirstValue,
core, "c3", core, "c3",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.timeTotal, c.timeTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[IdleTimeSeconds].FirstValue, coreData[idleTimeSeconds].FirstValue,
core, "idle", core, "idle",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.timeTotal, c.timeTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[InterruptTimeSeconds].FirstValue, coreData[interruptTimeSeconds].FirstValue,
core, "interrupt", core, "interrupt",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.timeTotal, c.timeTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[DPCTimeSeconds].FirstValue, coreData[dpcTimeSeconds].FirstValue,
core, "dpc", core, "dpc",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.timeTotal, c.timeTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[PrivilegedTimeSeconds].FirstValue, coreData[privilegedTimeSeconds].FirstValue,
core, "privileged", core, "privileged",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.timeTotal, c.timeTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[UserTimeSeconds].FirstValue, coreData[userTimeSeconds].FirstValue,
core, "user", core, "user",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.interruptsTotal, c.interruptsTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[InterruptsTotal].FirstValue, coreData[interruptsTotal].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dpcsTotal, c.dpcsTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[DPCsQueuedTotal].FirstValue, coreData[dpcQueuedPerSecond].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.clockInterruptsTotal, c.clockInterruptsTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[ClockInterruptsTotal].FirstValue, coreData[clockInterruptsTotal].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.idleBreakEventsTotal, c.idleBreakEventsTotal,
prometheus.CounterValue, prometheus.CounterValue,
coreData[IdleBreakEventsTotal].FirstValue, coreData[idleBreakEventsTotal].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.parkingStatus, c.parkingStatus,
prometheus.GaugeValue, prometheus.GaugeValue,
coreData[ParkingStatus].FirstValue, coreData[parkingStatus].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorFrequencyMHz, c.processorFrequencyMHz,
prometheus.GaugeValue, prometheus.GaugeValue,
coreData[ProcessorFrequencyMHz].FirstValue, coreData[processorFrequencyMHz].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorPerformance, c.processorPerformance,
prometheus.CounterValue, prometheus.CounterValue,
coreData[ProcessorPerformance].FirstValue, coreData[processorPerformance].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorMPerf, c.processorMPerf,
prometheus.CounterValue, prometheus.CounterValue,
coreData[ProcessorPerformance].SecondValue, counterProcessorMPerfValues.Value(),
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorRTC, c.processorRTC,
prometheus.CounterValue, prometheus.CounterValue,
coreData[ProcessorUtilityRate].SecondValue, counterProcessorRTCValues.Value(),
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorUtility, c.processorUtility,
prometheus.CounterValue, prometheus.CounterValue,
coreData[ProcessorUtilityRate].FirstValue, coreData[processorUtilityRate].FirstValue,
core, core,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorPrivilegedUtility, c.processorPrivilegedUtility,
prometheus.CounterValue, prometheus.CounterValue,
coreData[PrivilegedUtilitySeconds].FirstValue, coreData[privilegedUtilitySeconds].FirstValue,
core, core,
) )
} }

View File

@@ -0,0 +1,18 @@
//go:build windows
package cpu_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/cpu"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, cpu.Name, cpu.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, cpu.New, nil)
}

View File

@@ -4,19 +4,18 @@ package cpu_info
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"strconv" "strconv"
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const ( const Name = "cpu_info"
Name = "cpu_info"
)
type Config struct{} type Config struct{}
@@ -24,9 +23,9 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_Processor. // A Collector is a Prometheus Collector for a few WMI metrics in Win32_Processor.
type Collector struct { type Collector struct {
config Config config Config
miSession *mi.Session
wmiClient *wmi.Client miQuery mi.Query
cpuInfo *prometheus.Desc cpuInfo *prometheus.Desc
cpuCoreCount *prometheus.Desc cpuCoreCount *prometheus.Desc
@@ -65,12 +64,19 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { if miSession == nil {
return errors.New("wmiClient or SWbemServicesClient is nil") return errors.New("miSession is nil")
} }
c.wmiClient = wmiClient miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQuery = miQuery
c.miSession = miSession
c.cpuInfo = prometheus.NewDesc( c.cpuInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "", Name), prometheus.BuildFQName(types.Namespace, "", Name),
"Labelled CPU information as provided by Win32_Processor", "Labelled CPU information as provided by Win32_Processor",
@@ -135,18 +141,20 @@ func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
return nil return nil
} }
type win32Processor struct { type miProcessor struct {
Architecture uint32 Architecture uint32 `mi:"Architecture"`
DeviceID string DeviceID string `mi:"DeviceID"`
Description string Description string `mi:"Description"`
Family uint16 Family uint16 `mi:"Family"`
L2CacheSize uint32 L2CacheSize uint32 `mi:"L2CacheSize"`
L3CacheSize uint32 L3CacheSize uint32 `mi:"L3CacheSize"`
Name string Name string `mi:"Name"`
ThreadCount uint32 ThreadCount uint32 `mi:"ThreadCount"`
NumberOfCores uint32 NumberOfCores uint32 `mi:"NumberOfCores"`
NumberOfEnabledCore uint32 NumberOfEnabledCore uint32 `mi:"NumberOfEnabledCore"`
NumberOfLogicalProcessors uint32 NumberOfLogicalProcessors uint32 `mi:"NumberOfLogicalProcessors"`
Total int
} }
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
@@ -165,16 +173,9 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
} }
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32Processor var dst []miProcessor
// We use a static query here because the provided methods in wmi.go all issue a SELECT *; if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
// This results in the time-consuming LoadPercentage field being read which seems to measure each CPU return fmt.Errorf("WMI query failed: %w", err)
// serially over a 1 second interval, so the scrape time is at least 1s * num_sockets
if err := c.wmiClient.Query("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor", &dst); err != nil {
return err
}
if len(dst) == 0 {
return errors.New("WMI query returned empty result set")
} }
// Some CPUs end up exposing trailing spaces for certain strings, so clean them up // Some CPUs end up exposing trailing spaces for certain strings, so clean them up

View File

@@ -0,0 +1,16 @@
package cpu_info_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/cpu_info"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, cpu_info.Name, cpu_info.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, cpu_info.New, nil)
}

View File

@@ -6,10 +6,10 @@ import (
"log/slog" "log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi" "github.com/prometheus-community/windows_exporter/internal/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "cs" const Name = "cs"
@@ -61,7 +61,7 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Warn("The cs collector is deprecated and will be removed in a future release. " + logger.Warn("The cs collector is deprecated and will be removed in a future release. " +
"Logical processors has been moved to cpu_info collector. " + "Logical processors has been moved to cpu_info collector. " +
"Physical memory has been moved to memory collector. " + "Physical memory has been moved to memory collector. " +

View File

@@ -3,8 +3,8 @@ package cs_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/cs" "github.com/prometheus-community/windows_exporter/internal/collector/cs"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,101 @@
package dfsr
const (
// Connection Perflib: "DFS Replication Service Connections".
bytesReceivedTotal = "Total Bytes Received"
// Folder Perflib: "DFS Replicated Folder".
bandwidthSavingsUsingDFSReplicationTotal = "Bandwidth Savings Using DFS Replication"
compressedSizeOfFilesReceivedTotal = "Compressed Size of Files Received"
conflictBytesCleanedUpTotal = "Conflict Bytes Cleaned Up"
conflictBytesGeneratedTotal = "Conflict Bytes Generated"
conflictFilesCleanedUpTotal = "Conflict Files Cleaned Up"
conflictFilesGeneratedTotal = "Conflict Files Generated"
conflictFolderCleanupsCompletedTotal = "Conflict folder Cleanups Completed"
conflictSpaceInUse = "Conflict Space In Use"
deletedSpaceInUse = "Deleted Space In Use"
deletedBytesCleanedUpTotal = "Deleted Bytes Cleaned Up"
deletedBytesGeneratedTotal = "Deleted Bytes Generated"
deletedFilesCleanedUpTotal = "Deleted Files Cleaned Up"
deletedFilesGeneratedTotal = "Deleted Files Generated"
fileInstallsRetriedTotal = "File Installs Retried"
fileInstallsSucceededTotal = "File Installs Succeeded"
filesReceivedTotal = "Total Files Received"
rdcBytesReceivedTotal = "RDC Bytes Received"
rdcCompressedSizeOfFilesReceivedTotal = "RDC Compressed Size of Files Received"
rdcNumberOfFilesReceivedTotal = "RDC Number of Files Received"
rdcSizeOfFilesReceivedTotal = "RDC Size of Files Received"
sizeOfFilesReceivedTotal = "Size of Files Received"
stagingSpaceInUse = "Staging Space In Use"
stagingBytesCleanedUpTotal = "Staging Bytes Cleaned Up"
stagingBytesGeneratedTotal = "Staging Bytes Generated"
stagingFilesCleanedUpTotal = "Staging Files Cleaned Up"
stagingFilesGeneratedTotal = "Staging Files Generated"
updatesDroppedTotal = "Updates Dropped"
// Volume Perflib: "DFS Replication Service Volumes".
databaseCommitsTotal = "Database Commits"
databaseLookupsTotal = "Database Lookups"
usnJournalRecordsReadTotal = "USN Journal Records Read"
usnJournalRecordsAcceptedTotal = "USN Journal Records Accepted"
usnJournalUnreadPercentage = "USN Journal Records Unread Percentage"
)
// PerflibDFSRConnection Perflib: "DFS Replication Service Connections".
type PerflibDFSRConnection struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
BytesReceivedTotal float64 `perflib:"Total Bytes Received"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberOfFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
}
// perflibDFSRFolder Perflib: "DFS Replicated Folder".
type perflibDFSRFolder struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
ConflictBytesCleanedUpTotal float64 `perflib:"Conflict Bytes Cleaned Up"`
ConflictBytesGeneratedTotal float64 `perflib:"Conflict Bytes Generated"`
ConflictFilesCleanedUpTotal float64 `perflib:"Conflict Files Cleaned Up"`
ConflictFilesGeneratedTotal float64 `perflib:"Conflict Files Generated"`
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict folder Cleanups Completed"`
ConflictSpaceInUse float64 `perflib:"Conflict Space In Use"`
DeletedSpaceInUse float64 `perflib:"Deleted Space In Use"`
DeletedBytesCleanedUpTotal float64 `perflib:"Deleted Bytes Cleaned Up"`
DeletedBytesGeneratedTotal float64 `perflib:"Deleted Bytes Generated"`
DeletedFilesCleanedUpTotal float64 `perflib:"Deleted Files Cleaned Up"`
DeletedFilesGeneratedTotal float64 `perflib:"Deleted Files Generated"`
FileInstallsRetriedTotal float64 `perflib:"File Installs Retried"`
FileInstallsSucceededTotal float64 `perflib:"File Installs Succeeded"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberOfFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
StagingSpaceInUse float64 `perflib:"Staging Space In Use"`
StagingBytesCleanedUpTotal float64 `perflib:"Staging Bytes Cleaned Up"`
StagingBytesGeneratedTotal float64 `perflib:"Staging Bytes Generated"`
StagingFilesCleanedUpTotal float64 `perflib:"Staging Files Cleaned Up"`
StagingFilesGeneratedTotal float64 `perflib:"Staging Files Generated"`
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
}
// perflibDFSRVolume Perflib: "DFS Replication Service Volumes".
type perflibDFSRVolume struct {
Name string
DatabaseCommitsTotal float64 `perflib:"Database Commits"`
DatabaseLookupsTotal float64 `perflib:"Database Lookups"`
USNJournalRecordsReadTotal float64 `perflib:"USN Journal Records Read"`
USNJournalRecordsAcceptedTotal float64 `perflib:"USN Journal Records Accepted"`
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
}

View File

@@ -3,15 +3,19 @@
package dfsr package dfsr
import ( import (
"errors"
"fmt"
"log/slog" "log/slog"
"slices" "slices"
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "dfsr" const Name = "dfsr"
@@ -28,6 +32,10 @@ var ConfigDefaults = Config{
type Collector struct { type Collector struct {
config Config config Config
perfDataCollectorConnection perfdata.Collector
perfDataCollectorFolder perfdata.Collector
perfDataCollectorVolume perfdata.Collector
// connection source // connection source
connectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc connectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
connectionBytesReceivedTotal *prometheus.Desc connectionBytesReceivedTotal *prometheus.Desc
@@ -36,17 +44,17 @@ type Collector struct {
connectionRDCBytesReceivedTotal *prometheus.Desc connectionRDCBytesReceivedTotal *prometheus.Desc
connectionRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc connectionRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
connectionRDCSizeOfFilesReceivedTotal *prometheus.Desc connectionRDCSizeOfFilesReceivedTotal *prometheus.Desc
connectionRDCNumberofFilesReceivedTotal *prometheus.Desc connectionRDCNumberOfFilesReceivedTotal *prometheus.Desc
connectionSizeOfFilesReceivedTotal *prometheus.Desc connectionSizeOfFilesReceivedTotal *prometheus.Desc
// folder source // folder source
folderBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc folderBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
folderCompressedSizeOfFilesReceivedTotal *prometheus.Desc folderCompressedSizeOfFilesReceivedTotal *prometheus.Desc
folderConflictBytesCleanedupTotal *prometheus.Desc folderConflictBytesCleanedUpTotal *prometheus.Desc
folderConflictBytesGeneratedTotal *prometheus.Desc folderConflictBytesGeneratedTotal *prometheus.Desc
folderConflictFilesCleanedUpTotal *prometheus.Desc folderConflictFilesCleanedUpTotal *prometheus.Desc
folderConflictFilesGeneratedTotal *prometheus.Desc folderConflictFilesGeneratedTotal *prometheus.Desc
folderConflictfolderCleanupsCompletedTotal *prometheus.Desc folderConflictFolderCleanupsCompletedTotal *prometheus.Desc
folderConflictSpaceInUse *prometheus.Desc folderConflictSpaceInUse *prometheus.Desc
folderDeletedSpaceInUse *prometheus.Desc folderDeletedSpaceInUse *prometheus.Desc
folderDeletedBytesCleanedUpTotal *prometheus.Desc folderDeletedBytesCleanedUpTotal *prometheus.Desc
@@ -58,7 +66,7 @@ type Collector struct {
folderFilesReceivedTotal *prometheus.Desc folderFilesReceivedTotal *prometheus.Desc
folderRDCBytesReceivedTotal *prometheus.Desc folderRDCBytesReceivedTotal *prometheus.Desc
folderRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc folderRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
folderRDCNumberofFilesReceivedTotal *prometheus.Desc folderRDCNumberOfFilesReceivedTotal *prometheus.Desc
folderRDCSizeOfFilesReceivedTotal *prometheus.Desc folderRDCSizeOfFilesReceivedTotal *prometheus.Desc
folderSizeOfFilesReceivedTotal *prometheus.Desc folderSizeOfFilesReceivedTotal *prometheus.Desc
folderStagingSpaceInUse *prometheus.Desc folderStagingSpaceInUse *prometheus.Desc
@@ -119,10 +127,11 @@ func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{ c := &Collector{
config: ConfigDefaults, config: ConfigDefaults,
} }
c.config.CollectorsEnabled = make([]string, 0)
var collectorsEnabled string var collectorsEnabled string
app.Flag("collectors.dfsr.sources-enabled", "Comma-separated list of DFSR Perflib sources to use."). app.Flag("collector.dfsr.sources-enabled", "Comma-separated list of DFSR Perflib sources to use.").
Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled) Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.Action(func(*kingpin.ParseContext) error { app.Action(func(*kingpin.ParseContext) error {
@@ -139,6 +148,10 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
// Perflib sources are dynamic, depending on the enabled child collectors // Perflib sources are dynamic, depending on the enabled child collectors
expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled) expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled)
perflibDependencies := make([]string, 0, len(expandedChildCollectors)) perflibDependencies := make([]string, 0, len(expandedChildCollectors))
@@ -154,11 +167,87 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.") logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
//nolint:nestif
if utils.PDHEnabled() {
var err error
if slices.Contains(c.config.CollectorsEnabled, "connection") {
counters := []string{
bandwidthSavingsUsingDFSReplicationTotal,
bytesReceivedTotal,
compressedSizeOfFilesReceivedTotal,
filesReceivedTotal,
rdcBytesReceivedTotal,
rdcCompressedSizeOfFilesReceivedTotal,
rdcNumberOfFilesReceivedTotal,
rdcSizeOfFilesReceivedTotal,
sizeOfFilesReceivedTotal,
}
c.perfDataCollectorConnection, err = perfdata.NewCollector(perfdata.V1, "DFS Replication Connections", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
counters := []string{
bandwidthSavingsUsingDFSReplicationTotal,
compressedSizeOfFilesReceivedTotal,
conflictBytesCleanedUpTotal,
conflictBytesGeneratedTotal,
conflictFilesCleanedUpTotal,
conflictFilesGeneratedTotal,
conflictFolderCleanupsCompletedTotal,
conflictSpaceInUse,
deletedSpaceInUse,
deletedBytesCleanedUpTotal,
deletedBytesGeneratedTotal,
deletedFilesCleanedUpTotal,
deletedFilesGeneratedTotal,
fileInstallsRetriedTotal,
fileInstallsSucceededTotal,
filesReceivedTotal,
rdcBytesReceivedTotal,
rdcCompressedSizeOfFilesReceivedTotal,
rdcNumberOfFilesReceivedTotal,
rdcSizeOfFilesReceivedTotal,
sizeOfFilesReceivedTotal,
stagingSpaceInUse,
stagingBytesCleanedUpTotal,
stagingBytesGeneratedTotal,
stagingFilesCleanedUpTotal,
stagingFilesGeneratedTotal,
updatesDroppedTotal,
}
c.perfDataCollectorFolder, err = perfdata.NewCollector(perfdata.V1, "DFS Replicated Folders", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
counters := []string{
databaseCommitsTotal,
databaseLookupsTotal,
usnJournalRecordsReadTotal,
usnJournalRecordsAcceptedTotal,
usnJournalUnreadPercentage,
}
c.perfDataCollectorVolume, err = perfdata.NewCollector(perfdata.V1, "DFS Replication Service Volumes", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
}
}
}
// connection // connection
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc( c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"), prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
@@ -202,7 +291,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
nil, nil,
) )
c.connectionRDCNumberofFilesReceivedTotal = prometheus.NewDesc( c.connectionRDCNumberOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_rdc_received_files_total"), prometheus.BuildFQName(types.Namespace, Name, "connection_rdc_received_files_total"),
"Total number of files received using remote differential compression", "Total number of files received using remote differential compression",
[]string{"name"}, []string{"name"},
@@ -238,7 +327,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
nil, nil,
) )
c.folderConflictBytesCleanedupTotal = prometheus.NewDesc( c.folderConflictBytesCleanedUpTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_cleaned_up_bytes_total"), prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_cleaned_up_bytes_total"),
"Total size of conflict loser files and folders deleted from the Conflict and Deleted folder, in bytes", "Total size of conflict loser files and folders deleted from the Conflict and Deleted folder, in bytes",
[]string{"name"}, []string{"name"},
@@ -266,7 +355,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
nil, nil,
) )
c.folderConflictfolderCleanupsCompletedTotal = prometheus.NewDesc( c.folderConflictFolderCleanupsCompletedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_folder_cleanups_total"), prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_folder_cleanups_total"),
"Number of deletions of conflict loser files and folders in the Conflict and Deleted", "Number of deletions of conflict loser files and folders in the Conflict and Deleted",
[]string{"name"}, []string{"name"},
@@ -350,7 +439,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
nil, nil,
) )
c.folderRDCNumberofFilesReceivedTotal = prometheus.NewDesc( c.folderRDCNumberOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_rdc_received_files_total"), prometheus.BuildFQName(types.Namespace, Name, "folder_rdc_received_files_total"),
"Total number of files received with Remote Differential Compression", "Total number of files received with Remote Differential Compression",
[]string{"name"}, []string{"name"},
@@ -478,6 +567,10 @@ func (c *Collector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCol
// Collect implements the Collector interface. // Collect implements the Collector interface.
// Sends metric values for each metric to the provided prometheus Metric channel. // Sends metric values for each metric to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if utils.PDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
for _, fn := range c.dfsrChildCollectors { for _, fn := range c.dfsrChildCollectors {
err := fn(ctx, logger, ch) err := fn(ctx, logger, ch)
@@ -489,27 +582,10 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
return nil return nil
} }
// PerflibDFSRConnection Perflib: "DFS Replication Service Connections".
type PerflibDFSRConnection struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
BytesReceivedTotal float64 `perflib:"Total Bytes Received"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberofFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
}
func (c *Collector) collectConnection(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectConnection(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
var dst []PerflibDFSRConnection var dst []PerflibDFSRConnection
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Connections"], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["DFS Replication Connections"], &dst, logger); err != nil {
return err return err
} }
@@ -564,9 +640,9 @@ func (c *Collector) collectConnection(ctx *types.ScrapeContext, logger *slog.Log
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.connectionRDCNumberofFilesReceivedTotal, c.connectionRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue, prometheus.CounterValue,
connection.RDCNumberofFilesReceivedTotal, connection.RDCNumberOfFilesReceivedTotal,
connection.Name, connection.Name,
) )
@@ -581,45 +657,10 @@ func (c *Collector) collectConnection(ctx *types.ScrapeContext, logger *slog.Log
return nil return nil
} }
// perflibDFSRFolder Perflib: "DFS Replicated Folder".
type perflibDFSRFolder struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
ConflictBytesCleanedupTotal float64 `perflib:"Conflict Bytes Cleaned Up"`
ConflictBytesGeneratedTotal float64 `perflib:"Conflict Bytes Generated"`
ConflictFilesCleanedUpTotal float64 `perflib:"Conflict Files Cleaned Up"`
ConflictFilesGeneratedTotal float64 `perflib:"Conflict Files Generated"`
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict folder Cleanups Completed"`
ConflictSpaceInUse float64 `perflib:"Conflict Space In Use"`
DeletedSpaceInUse float64 `perflib:"Deleted Space In Use"`
DeletedBytesCleanedUpTotal float64 `perflib:"Deleted Bytes Cleaned Up"`
DeletedBytesGeneratedTotal float64 `perflib:"Deleted Bytes Generated"`
DeletedFilesCleanedUpTotal float64 `perflib:"Deleted Files Cleaned Up"`
DeletedFilesGeneratedTotal float64 `perflib:"Deleted Files Generated"`
FileInstallsRetriedTotal float64 `perflib:"File Installs Retried"`
FileInstallsSucceededTotal float64 `perflib:"File Installs Succeeded"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberofFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
StagingSpaceInUse float64 `perflib:"Staging Space In Use"`
StagingBytesCleanedUpTotal float64 `perflib:"Staging Bytes Cleaned Up"`
StagingBytesGeneratedTotal float64 `perflib:"Staging Bytes Generated"`
StagingFilesCleanedUpTotal float64 `perflib:"Staging Files Cleaned Up"`
StagingFilesGeneratedTotal float64 `perflib:"Staging Files Generated"`
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
}
func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
var dst []perflibDFSRFolder var dst []perflibDFSRFolder
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replicated Folders"], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["DFS Replicated Folders"], &dst, logger); err != nil {
return err return err
} }
@@ -639,9 +680,9 @@ func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger *slog.Logger,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.folderConflictBytesCleanedupTotal, c.folderConflictBytesCleanedUpTotal,
prometheus.CounterValue, prometheus.CounterValue,
folder.ConflictBytesCleanedupTotal, folder.ConflictBytesCleanedUpTotal,
folder.Name, folder.Name,
) )
@@ -667,7 +708,7 @@ func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger *slog.Logger,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.folderConflictfolderCleanupsCompletedTotal, c.folderConflictFolderCleanupsCompletedTotal,
prometheus.CounterValue, prometheus.CounterValue,
folder.ConflictFolderCleanupsCompletedTotal, folder.ConflictFolderCleanupsCompletedTotal,
folder.Name, folder.Name,
@@ -751,9 +792,9 @@ func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger *slog.Logger,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.folderRDCNumberofFilesReceivedTotal, c.folderRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue, prometheus.CounterValue,
folder.RDCNumberofFilesReceivedTotal, folder.RDCNumberOfFilesReceivedTotal,
folder.Name, folder.Name,
) )
@@ -817,23 +858,10 @@ func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger *slog.Logger,
return nil return nil
} }
// perflibDFSRVolume Perflib: "DFS Replication Service Volumes".
type perflibDFSRVolume struct {
Name string
DatabaseCommitsTotal float64 `perflib:"Database Commits"`
DatabaseLookupsTotal float64 `perflib:"Database Lookups"`
USNJournalRecordsReadTotal float64 `perflib:"USN Journal Records Read"`
USNJournalRecordsAcceptedTotal float64 `perflib:"USN Journal Records Accepted"`
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
}
func (c *Collector) collectVolume(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectVolume(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
var dst []perflibDFSRVolume var dst []perflibDFSRVolume
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Service Volumes"], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["DFS Replication Service Volumes"], &dst, logger); err != nil {
return err return err
} }
@@ -876,3 +904,353 @@ func (c *Collector) collectVolume(ctx *types.ScrapeContext, logger *slog.Logger,
return nil return nil
} }
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 3)
if slices.Contains(c.config.CollectorsEnabled, "connection") {
errs = append(errs, c.collectPDHConnection(ch))
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
errs = append(errs, c.collectPDHFolder(ch))
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
errs = append(errs, c.collectPDHVolume(ch))
}
return errors.Join(errs...)
}
func (c *Collector) collectPDHConnection(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorConnection.Collect()
if err != nil {
return fmt.Errorf("failed to collect DFS Replication Connections metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for DFS Replication Connections returned empty result set")
}
for name, connection := range perfData {
ch <- prometheus.MustNewConstMetric(
c.connectionBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
connection[bandwidthSavingsUsingDFSReplicationTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionBytesReceivedTotal,
prometheus.CounterValue,
connection[bytesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection[compressedSizeOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionFilesReceivedTotal,
prometheus.CounterValue,
connection[filesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCBytesReceivedTotal,
prometheus.CounterValue,
connection[rdcBytesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection[rdcCompressedSizeOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection[rdcSizeOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue,
connection[rdcNumberOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection[sizeOfFilesReceivedTotal].FirstValue,
name,
)
}
return nil
}
func (c *Collector) collectPDHFolder(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorFolder.Collect()
if err != nil {
return fmt.Errorf("failed to collect DFS Replicated Folders metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for DFS Replicated Folders returned empty result set")
}
for name, folder := range perfData {
ch <- prometheus.MustNewConstMetric(
c.folderBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
folder[bandwidthSavingsUsingDFSReplicationTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder[compressedSizeOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictBytesCleanedUpTotal,
prometheus.CounterValue,
folder[conflictBytesCleanedUpTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictBytesGeneratedTotal,
prometheus.CounterValue,
folder[conflictBytesGeneratedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFilesCleanedUpTotal,
prometheus.CounterValue,
folder[conflictFilesCleanedUpTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFilesGeneratedTotal,
prometheus.CounterValue,
folder[conflictFilesGeneratedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFolderCleanupsCompletedTotal,
prometheus.CounterValue,
folder[conflictFolderCleanupsCompletedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictSpaceInUse,
prometheus.GaugeValue,
folder[conflictSpaceInUse].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedSpaceInUse,
prometheus.GaugeValue,
folder[deletedSpaceInUse].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedBytesCleanedUpTotal,
prometheus.CounterValue,
folder[deletedBytesCleanedUpTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedBytesGeneratedTotal,
prometheus.CounterValue,
folder[deletedBytesGeneratedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedFilesCleanedUpTotal,
prometheus.CounterValue,
folder[deletedFilesCleanedUpTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedFilesGeneratedTotal,
prometheus.CounterValue,
folder[deletedFilesGeneratedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFileInstallsRetriedTotal,
prometheus.CounterValue,
folder[fileInstallsRetriedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFileInstallsSucceededTotal,
prometheus.CounterValue,
folder[fileInstallsSucceededTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFilesReceivedTotal,
prometheus.CounterValue,
folder[filesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCBytesReceivedTotal,
prometheus.CounterValue,
folder[rdcBytesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder[rdcCompressedSizeOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue,
folder[rdcNumberOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder[rdcSizeOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder[sizeOfFilesReceivedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingSpaceInUse,
prometheus.GaugeValue,
folder[stagingSpaceInUse].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingBytesCleanedUpTotal,
prometheus.CounterValue,
folder[stagingBytesCleanedUpTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingBytesGeneratedTotal,
prometheus.CounterValue,
folder[stagingBytesGeneratedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingFilesCleanedUpTotal,
prometheus.CounterValue,
folder[stagingFilesCleanedUpTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingFilesGeneratedTotal,
prometheus.CounterValue,
folder[stagingFilesGeneratedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.folderUpdatesDroppedTotal,
prometheus.CounterValue,
folder[updatesDroppedTotal].FirstValue,
name,
)
}
return nil
}
func (c *Collector) collectPDHVolume(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorVolume.Collect()
if err != nil {
return fmt.Errorf("failed to collect DFS Replication Volumes metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for DFS Replication Volumes returned empty result set")
}
for name, volume := range perfData {
ch <- prometheus.MustNewConstMetric(
c.volumeDatabaseLookupsTotal,
prometheus.CounterValue,
volume[databaseLookupsTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeDatabaseCommitsTotal,
prometheus.CounterValue,
volume[databaseCommitsTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalRecordsAcceptedTotal,
prometheus.CounterValue,
volume[usnJournalRecordsAcceptedTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalRecordsReadTotal,
prometheus.CounterValue,
volume[usnJournalRecordsReadTotal].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalUnreadPercentage,
prometheus.GaugeValue,
volume[usnJournalUnreadPercentage].FirstValue,
name,
)
}
return nil
}

View File

@@ -3,8 +3,8 @@ package dfsr_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/dfsr" "github.com/prometheus-community/windows_exporter/internal/collector/dfsr"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,60 @@
package dhcp
const (
acksTotal = "Acks/sec"
activeQueueLength = "Active Queue Length"
conflictCheckQueueLength = "Conflict Check Queue Length"
declinesTotal = "Declines/sec"
deniedDueToMatch = "Denied due to match."
deniedDueToNonMatch = "Denied due to match."
discoversTotal = "Discovers/sec"
duplicatesDroppedTotal = "Duplicates Dropped/sec"
failoverBndAckReceivedTotal = "Failover: BndAck received/sec."
failoverBndAckSentTotal = "Failover: BndAck sent/sec."
failoverBndUpdDropped = "Failover: BndUpd Dropped."
failoverBndUpdPendingOutboundQueue = "Failover: BndUpd pending in outbound queue."
failoverBndUpdReceivedTotal = "Failover: BndUpd received/sec."
failoverBndUpdSentTotal = "Failover: BndUpd sent/sec."
failoverTransitionsCommunicationInterruptedState = "Failover: Transitions to COMMUNICATION-INTERRUPTED state."
failoverTransitionsPartnerDownState = "Failover: Transitions to PARTNER-DOWN state."
failoverTransitionsRecoverState = "Failover: Transitions to RECOVER state."
informsTotal = "Informs/sec"
nacksTotal = "Nacks/sec"
offerQueueLength = "Offer Queue Length"
offersTotal = "Offers/sec"
packetsExpiredTotal = "Packets Expired/sec"
packetsReceivedTotal = "Packets Received/sec"
releasesTotal = "Releases/sec"
requestsTotal = "Requests/sec"
)
// represents perflib metrics from the DHCP Server class.
// While the name of a number of perflib metrics would indicate a rate is being returned (E.G. Packets Received/sec),
// perflib instead returns a counter, hence the "Total" suffix in some of the variable names.
type dhcpPerf struct {
AcksTotal float64 `perflib:"Acks/sec"`
ActiveQueueLength float64 `perflib:"Active Queue Length"`
ConflictCheckQueueLength float64 `perflib:"Conflict Check Queue Length"`
DeclinesTotal float64 `perflib:"Declines/sec"`
DeniedDueToMatch float64 `perflib:"Denied due to match."`
DeniedDueToNonMatch float64 `perflib:"Denied due to match."`
DiscoversTotal float64 `perflib:"Discovers/sec"`
DuplicatesDroppedTotal float64 `perflib:"Duplicates Dropped/sec"`
FailoverBndAckReceivedTotal float64 `perflib:"Failover: BndAck received/sec."`
FailoverBndAckSentTotal float64 `perflib:"Failover: BndAck sent/sec."`
FailoverBndUpdDropped float64 `perflib:"Failover: BndUpd Dropped."`
FailoverBndUpdPendingOutboundQueue float64 `perflib:"Failover: BndUpd pending in outbound queue."`
FailoverBndUpdReceivedTotal float64 `perflib:"Failover: BndUpd received/sec."`
FailoverBndUpdSentTotal float64 `perflib:"Failover: BndUpd sent/sec."`
FailoverTransitionsCommunicationInterruptedState float64 `perflib:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
FailoverTransitionsPartnerDownState float64 `perflib:"Failover: Transitions to PARTNER-DOWN state."`
FailoverTransitionsRecoverState float64 `perflib:"Failover: Transitions to RECOVER state."`
InformsTotal float64 `perflib:"Informs/sec"`
NacksTotal float64 `perflib:"Nacks/sec"`
OfferQueueLength float64 `perflib:"Offer Queue Length"`
OffersTotal float64 `perflib:"Offers/sec"`
PacketsExpiredTotal float64 `perflib:"Packets Expired/sec"`
PacketsReceivedTotal float64 `perflib:"Packets Received/sec"`
ReleasesTotal float64 `perflib:"Releases/sec"`
RequestsTotal float64 `perflib:"Requests/sec"`
}

View File

@@ -3,13 +3,18 @@
package dhcp package dhcp
import ( import (
"errors"
"fmt"
"log/slog" "log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "dhcp" const Name = "dhcp"
@@ -22,6 +27,8 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
perfDataCollector perfdata.Collector
acksTotal *prometheus.Desc acksTotal *prometheus.Desc
activeQueueLength *prometheus.Desc activeQueueLength *prometheus.Desc
conflictCheckQueueLength *prometheus.Desc conflictCheckQueueLength *prometheus.Desc
@@ -30,12 +37,12 @@ type Collector struct {
deniedDueToNonMatch *prometheus.Desc deniedDueToNonMatch *prometheus.Desc
discoversTotal *prometheus.Desc discoversTotal *prometheus.Desc
duplicatesDroppedTotal *prometheus.Desc duplicatesDroppedTotal *prometheus.Desc
failoverBndackReceivedTotal *prometheus.Desc failoverBndAckReceivedTotal *prometheus.Desc
failoverBndackSentTotal *prometheus.Desc failoverBndAckSentTotal *prometheus.Desc
failoverBndupdDropped *prometheus.Desc failoverBndUpdDropped *prometheus.Desc
failoverBndupdPendingOutboundQueue *prometheus.Desc failoverBndUpdPendingOutboundQueue *prometheus.Desc
failoverBndupdReceivedTotal *prometheus.Desc failoverBndUpdReceivedTotal *prometheus.Desc
failoverBndupdSentTotal *prometheus.Desc failoverBndUpdSentTotal *prometheus.Desc
failoverTransitionsCommunicationInterruptedState *prometheus.Desc failoverTransitionsCommunicationInterruptedState *prometheus.Desc
failoverTransitionsPartnerDownState *prometheus.Desc failoverTransitionsPartnerDownState *prometheus.Desc
failoverTransitionsRecoverState *prometheus.Desc failoverTransitionsRecoverState *prometheus.Desc
@@ -70,6 +77,10 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{"DHCP Server"}, nil return []string{"DHCP Server"}, nil
} }
@@ -77,7 +88,44 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
counters := []string{
acksTotal,
activeQueueLength,
conflictCheckQueueLength,
declinesTotal,
deniedDueToMatch,
deniedDueToNonMatch,
discoversTotal,
duplicatesDroppedTotal,
failoverBndAckReceivedTotal,
failoverBndAckSentTotal,
failoverBndUpdDropped,
failoverBndUpdPendingOutboundQueue,
failoverBndUpdReceivedTotal,
failoverBndUpdSentTotal,
failoverTransitionsCommunicationInterruptedState,
failoverTransitionsPartnerDownState,
failoverTransitionsRecoverState,
informsTotal,
nacksTotal,
offerQueueLength,
offersTotal,
packetsExpiredTotal,
packetsReceivedTotal,
releasesTotal,
requestsTotal,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DHCP Server", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
}
}
c.packetsReceivedTotal = prometheus.NewDesc( c.packetsReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"), prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
"Total number of packets received by the DHCP server (PacketsReceivedTotal)", "Total number of packets received by the DHCP server (PacketsReceivedTotal)",
@@ -174,31 +222,31 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
nil, nil,
nil, nil,
) )
c.failoverBndupdSentTotal = prometheus.NewDesc( c.failoverBndUpdSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_sent_total"), prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_sent_total"),
"Number of DHCP fail over Binding Update messages sent (FailoverBndupdSentTotal)", "Number of DHCP fail over Binding Update messages sent (FailoverBndupdSentTotal)",
nil, nil,
nil, nil,
) )
c.failoverBndupdReceivedTotal = prometheus.NewDesc( c.failoverBndUpdReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_received_total"), prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_received_total"),
"Number of DHCP fail over Binding Update messages received (FailoverBndupdReceivedTotal)", "Number of DHCP fail over Binding Update messages received (FailoverBndupdReceivedTotal)",
nil, nil,
nil, nil,
) )
c.failoverBndackSentTotal = prometheus.NewDesc( c.failoverBndAckSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_sent_total"), prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_sent_total"),
"Number of DHCP fail over Binding Ack messages sent (FailoverBndackSentTotal)", "Number of DHCP fail over Binding Ack messages sent (FailoverBndackSentTotal)",
nil, nil,
nil, nil,
) )
c.failoverBndackReceivedTotal = prometheus.NewDesc( c.failoverBndAckReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_received_total"), prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_received_total"),
"Number of DHCP fail over Binding Ack messages received (FailoverBndackReceivedTotal)", "Number of DHCP fail over Binding Ack messages received (FailoverBndackReceivedTotal)",
nil, nil,
nil, nil,
) )
c.failoverBndupdPendingOutboundQueue = prometheus.NewDesc( c.failoverBndUpdPendingOutboundQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_pending_in_outbound_queue"), prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_pending_in_outbound_queue"),
"Number of pending outbound DHCP fail over Binding Update messages (FailoverBndupdPendingOutboundQueue)", "Number of pending outbound DHCP fail over Binding Update messages (FailoverBndupdPendingOutboundQueue)",
nil, nil,
@@ -222,7 +270,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
nil, nil,
nil, nil,
) )
c.failoverBndupdDropped = prometheus.NewDesc( c.failoverBndUpdDropped = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_dropped_total"), prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_dropped_total"),
"Total number of DHCP fail over Binding Updates dropped (FailoverBndupdDropped)", "Total number of DHCP fail over Binding Updates dropped (FailoverBndupdDropped)",
nil, nil,
@@ -232,43 +280,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
return nil return nil
} }
// represents perflib metrics from the DHCP Server class.
// While the name of a number of perflib metrics would indicate a rate is being returned (E.G. Packets Received/sec),
// perflib instead returns a counter, hence the "Total" suffix in some of the variable names.
type dhcpPerf struct {
PacketsReceivedTotal float64 `perflib:"Packets Received/sec"`
DuplicatesDroppedTotal float64 `perflib:"Duplicates Dropped/sec"`
PacketsExpiredTotal float64 `perflib:"Packets Expired/sec"`
ActiveQueueLength float64 `perflib:"Active Queue Length"`
ConflictCheckQueueLength float64 `perflib:"Conflict Check Queue Length"`
DiscoversTotal float64 `perflib:"Discovers/sec"`
OffersTotal float64 `perflib:"Offers/sec"`
RequestsTotal float64 `perflib:"Requests/sec"`
InformsTotal float64 `perflib:"Informs/sec"`
AcksTotal float64 `perflib:"Acks/sec"`
NacksTotal float64 `perflib:"Nacks/sec"`
DeclinesTotal float64 `perflib:"Declines/sec"`
ReleasesTotal float64 `perflib:"Releases/sec"`
DeniedDueToMatch float64 `perflib:"Denied due to match."`
DeniedDueToNonMatch float64 `perflib:"Denied due to match."`
OfferQueueLength float64 `perflib:"Offer Queue Length"`
FailoverBndupdSentTotal float64 `perflib:"Failover: BndUpd sent/sec."`
FailoverBndupdReceivedTotal float64 `perflib:"Failover: BndUpd received/sec."`
FailoverBndackSentTotal float64 `perflib:"Failover: BndAck sent/sec."`
FailoverBndackReceivedTotal float64 `perflib:"Failover: BndAck received/sec."`
FailoverBndupdPendingOutboundQueue float64 `perflib:"Failover: BndUpd pending in outbound queue."`
FailoverTransitionsCommunicationinterruptedState float64 `perflib:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
FailoverTransitionsPartnerdownState float64 `perflib:"Failover: Transitions to PARTNER-DOWN state."`
FailoverTransitionsRecoverState float64 `perflib:"Failover: Transitions to RECOVER state."`
FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."`
}
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if utils.PDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
return c.collect(ctx, logger, ch)
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dhcpPerfs []dhcpPerf var dhcpPerfs []dhcpPerf
if err := perflib.UnmarshalObject(ctx.PerfObjects["DHCP Server"], &dhcpPerfs, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["DHCP Server"], &dhcpPerfs, logger); err != nil {
return err return err
} }
@@ -369,45 +394,45 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverBndupdSentTotal, c.failoverBndUpdSentTotal,
prometheus.CounterValue, prometheus.CounterValue,
dhcpPerfs[0].FailoverBndupdSentTotal, dhcpPerfs[0].FailoverBndUpdSentTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverBndupdReceivedTotal, c.failoverBndUpdReceivedTotal,
prometheus.CounterValue, prometheus.CounterValue,
dhcpPerfs[0].FailoverBndupdReceivedTotal, dhcpPerfs[0].FailoverBndUpdReceivedTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverBndackSentTotal, c.failoverBndAckSentTotal,
prometheus.CounterValue, prometheus.CounterValue,
dhcpPerfs[0].FailoverBndackSentTotal, dhcpPerfs[0].FailoverBndAckSentTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverBndackReceivedTotal, c.failoverBndAckReceivedTotal,
prometheus.CounterValue, prometheus.CounterValue,
dhcpPerfs[0].FailoverBndackReceivedTotal, dhcpPerfs[0].FailoverBndAckReceivedTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverBndupdPendingOutboundQueue, c.failoverBndUpdPendingOutboundQueue,
prometheus.GaugeValue, prometheus.GaugeValue,
dhcpPerfs[0].FailoverBndupdPendingOutboundQueue, dhcpPerfs[0].FailoverBndUpdPendingOutboundQueue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsCommunicationInterruptedState, c.failoverTransitionsCommunicationInterruptedState,
prometheus.CounterValue, prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsCommunicationinterruptedState, dhcpPerfs[0].FailoverTransitionsCommunicationInterruptedState,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsPartnerDownState, c.failoverTransitionsPartnerDownState,
prometheus.CounterValue, prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsPartnerdownState, dhcpPerfs[0].FailoverTransitionsPartnerDownState,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
@@ -417,9 +442,173 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.failoverBndupdDropped, c.failoverBndUpdDropped,
prometheus.CounterValue, prometheus.CounterValue,
dhcpPerfs[0].FailoverBndupdDropped, dhcpPerfs[0].FailoverBndUpdDropped,
)
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect DHCP Server metrics: %w", err)
}
data, ok := perfData[perftypes.EmptyInstance]
if !ok {
return errors.New("perflib query for DHCP Server returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
data[packetsReceivedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.duplicatesDroppedTotal,
prometheus.CounterValue,
data[duplicatesDroppedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.packetsExpiredTotal,
prometheus.CounterValue,
data[packetsExpiredTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.activeQueueLength,
prometheus.GaugeValue,
data[activeQueueLength].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.conflictCheckQueueLength,
prometheus.GaugeValue,
data[conflictCheckQueueLength].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.discoversTotal,
prometheus.CounterValue,
data[discoversTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.offersTotal,
prometheus.CounterValue,
data[offersTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.requestsTotal,
prometheus.CounterValue,
data[requestsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.informsTotal,
prometheus.CounterValue,
data[informsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.acksTotal,
prometheus.CounterValue,
data[acksTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.nACKsTotal,
prometheus.CounterValue,
data[nacksTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.declinesTotal,
prometheus.CounterValue,
data[declinesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.releasesTotal,
prometheus.CounterValue,
data[releasesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.offerQueueLength,
prometheus.GaugeValue,
data[offerQueueLength].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.deniedDueToMatch,
prometheus.CounterValue,
data[deniedDueToMatch].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.deniedDueToNonMatch,
prometheus.CounterValue,
data[deniedDueToNonMatch].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdSentTotal,
prometheus.CounterValue,
data[failoverBndUpdSentTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdReceivedTotal,
prometheus.CounterValue,
data[failoverBndUpdReceivedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndAckSentTotal,
prometheus.CounterValue,
data[failoverBndAckSentTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndAckReceivedTotal,
prometheus.CounterValue,
data[failoverBndAckReceivedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdPendingOutboundQueue,
prometheus.GaugeValue,
data[failoverBndUpdPendingOutboundQueue].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsCommunicationInterruptedState,
prometheus.CounterValue,
data[failoverTransitionsCommunicationInterruptedState].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsPartnerDownState,
prometheus.CounterValue,
data[failoverTransitionsPartnerDownState].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsRecoverState,
prometheus.CounterValue,
data[failoverTransitionsRecoverState].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdDropped,
prometheus.CounterValue,
data[failoverBndUpdDropped].FirstValue,
) )
return nil return nil

View File

@@ -3,8 +3,8 @@ package dhcp_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/dhcp" "github.com/prometheus-community/windows_exporter/internal/collector/dhcp"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -4,19 +4,17 @@ package diskdrive
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const ( const Name = "diskdrive"
Name = "diskdrive"
win32DiskQuery = "SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive"
)
type Config struct{} type Config struct{}
@@ -25,7 +23,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive. // A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive.
type Collector struct { type Collector struct {
config Config config Config
wmiClient *wmi.Client miSession *mi.Session
miQuery mi.Query
availability *prometheus.Desc availability *prometheus.Desc
diskInfo *prometheus.Desc diskInfo *prometheus.Desc
@@ -62,12 +61,19 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { if miSession == nil {
return errors.New("wmiClient or SWbemServicesClient is nil") return errors.New("miSession is nil")
} }
c.wmiClient = wmiClient miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQuery = miQuery
c.miSession = miSession
c.diskInfo = prometheus.NewDesc( c.diskInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"), prometheus.BuildFQName(types.Namespace, Name, "info"),
"General drive information", "General drive information",
@@ -108,14 +114,14 @@ func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
} }
type win32_DiskDrive struct { type win32_DiskDrive struct {
DeviceID string DeviceID string `mi:"DeviceID"`
Model string Model string `mi:"Model"`
Size uint64 Size uint64 `mi:"Size"`
Name string Name string `mi:"Name"`
Caption string Caption string `mi:"Caption"`
Partitions uint32 Partitions uint32 `mi:"Partitions"`
Status string Status string `mi:"Status"`
Availability uint16 Availability uint16 `mi:"Availability"`
} }
var ( var (
@@ -175,9 +181,8 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32_DiskDrive var dst []win32_DiskDrive
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
if err := c.wmiClient.Query(win32DiskQuery, &dst); err != nil { return fmt.Errorf("WMI query failed: %w", err)
return err
} }
if len(dst) == 0 { if len(dst) == 0 {

View File

@@ -3,8 +3,8 @@ package diskdrive_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/diskdrive" "github.com/prometheus-community/windows_exporter/internal/collector/diskdrive"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,92 @@
package dns
const (
_ = "% User Time"
_ = "176"
_ = "Async Fast Reads/sec"
axfrRequestReceived = "AXFR Request Received"
axfrRequestSent = "AXFR Request Sent"
axfrResponseReceived = "AXFR Response Received"
axfrSuccessReceived = "AXFR Success Received"
axfrSuccessSent = "AXFR Success Sent"
cachingMemory = "Caching Memory"
_ = "Data Flush Pages/sec"
_ = "Data Flushes/sec"
databaseNodeMemory = "Database Node Memory"
dynamicUpdateNoOperation = "Dynamic Update NoOperation"
_ = "Dynamic Update NoOperation/sec"
dynamicUpdateQueued = "Dynamic Update Queued"
_ = "Dynamic Update Received"
_ = "Dynamic Update Received/sec"
dynamicUpdateRejected = "Dynamic Update Rejected"
dynamicUpdateTimeOuts = "Dynamic Update TimeOuts"
dynamicUpdateWrittenToDatabase = "Dynamic Update Written to Database"
_ = "Dynamic Update Written to Database/sec"
_ = "Enumerations Server/sec"
_ = "Fast Read Not Possibles/sec"
_ = "Fast Read Resource Misses/sec"
ixfrRequestReceived = "IXFR Request Received"
ixfrRequestSent = "IXFR Request Sent"
ixfrResponseReceived = "IXFR Response Received"
_ = "IXFR Success Received"
ixfrSuccessSent = "IXFR Success Sent"
ixfrTCPSuccessReceived = "IXFR TCP Success Received"
ixfrUDPSuccessReceived = "IXFR UDP Success Received"
_ = "Lazy Write Flushes/sec"
_ = "Lazy Write Pages/sec"
_ = "Level 2 TLB Fills/sec"
nbStatMemory = "Nbstat Memory"
notifyReceived = "Notify Received"
notifySent = "Notify Sent"
_ = "Query Dropped Bad Socket"
_ = "Query Dropped Bad Socket/sec"
_ = "Query Dropped By Policy"
_ = "Query Dropped By Policy/sec"
_ = "Query Dropped By Response Rate Limiting"
_ = "Query Dropped By Response Rate Limiting/sec"
_ = "Query Dropped Send"
_ = "Query Dropped Send/sec"
_ = "Query Dropped Total"
_ = "Query Dropped Total/sec"
recordFlowMemory = "Record Flow Memory"
recursiveQueries = "Recursive Queries"
_ = "Recursive Queries/sec"
recursiveQueryFailure = "Recursive Query Failure"
_ = "Recursive Query Failure/sec"
_ = "Recursive Send TimeOuts"
recursiveSendTimeOuts = "Recursive TimeOut/sec"
_ = "Responses Suppressed"
_ = "Responses Suppressed/sec"
secureUpdateFailure = "Secure Update Failure"
secureUpdateReceived = "Secure Update Received"
_ = "Secure Update Received/sec"
tcpMessageMemory = "TCP Message Memory"
tcpQueryReceived = "TCP Query Received"
_ = "TCP Query Received/sec"
tcpResponseSent = "TCP Response Sent"
_ = "TCP Response Sent/sec"
_ = "Total Query Received"
_ = "Total Query Received/sec"
_ = "Total Remote Inflight Queries"
_ = "Total Response Sent"
_ = "Total Response Sent/sec"
udpMessageMemory = "UDP Message Memory"
udpQueryReceived = "UDP Query Received"
_ = "UDP Query Received/sec"
udpResponseSent = "UDP Response Sent"
_ = "UDP Response Sent/sec"
unmatchedResponsesReceived = "Unmatched Responses Received"
_ = "Virtual Bytes"
winsLookupReceived = "WINS Lookup Received"
_ = "WINS Lookup Received/sec"
winsResponseSent = "WINS Response Sent"
_ = "WINS Response Sent/sec"
winsReverseLookupReceived = "WINS Reverse Lookup Received"
_ = "WINS Reverse Lookup Received/sec"
winsReverseResponseSent = "WINS Reverse Response Sent"
_ = "WINS Reverse Response Sent/sec"
zoneTransferFailure = "Zone Transfer Failure"
zoneTransferSOARequestSent = "Zone Transfer Request Received"
_ = "Zone Transfer SOA Request Sent"
_ = "Zone Transfer Success"
)

View File

@@ -4,12 +4,15 @@ package dns
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "dns" const Name = "dns"
@@ -20,8 +23,9 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
type Collector struct { type Collector struct {
config Config config Config
wmiClient *wmi.Client
perfDataCollector perfdata.Collector
dynamicUpdatesFailures *prometheus.Desc dynamicUpdatesFailures *prometheus.Desc
dynamicUpdatesQueued *prometheus.Desc dynamicUpdatesQueued *prometheus.Desc
@@ -75,12 +79,56 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { counters := []string{
return errors.New("wmiClient or SWbemServicesClient is nil") axfrRequestReceived,
axfrRequestSent,
axfrResponseReceived,
axfrSuccessReceived,
axfrSuccessSent,
cachingMemory,
databaseNodeMemory,
dynamicUpdateNoOperation,
dynamicUpdateQueued,
dynamicUpdateRejected,
dynamicUpdateTimeOuts,
dynamicUpdateWrittenToDatabase,
ixfrRequestReceived,
ixfrRequestSent,
ixfrResponseReceived,
ixfrSuccessSent,
ixfrTCPSuccessReceived,
ixfrUDPSuccessReceived,
nbStatMemory,
notifyReceived,
notifySent,
recordFlowMemory,
recursiveQueries,
recursiveQueryFailure,
recursiveSendTimeOuts,
secureUpdateFailure,
secureUpdateReceived,
tcpMessageMemory,
tcpQueryReceived,
tcpResponseSent,
udpMessageMemory,
udpQueryReceived,
udpResponseSent,
unmatchedResponsesReceived,
winsLookupReceived,
winsResponseSent,
winsReverseLookupReceived,
winsReverseResponseSent,
zoneTransferFailure,
zoneTransferSOARequestSent,
} }
c.wmiClient = wmiClient var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DNS", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DNS collector: %w", err)
}
c.zoneTransferRequestsReceived = prometheus.NewDesc( c.zoneTransferRequestsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"), prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
@@ -220,138 +268,80 @@ func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name)) perfData, err := c.perfDataCollector.Collect()
if err := c.collect(ch); err != nil { if err != nil {
logger.Error("failed collecting dns metrics", return fmt.Errorf("failed to collect DNS metrics: %w", err)
slog.Any("err", err),
)
return err
} }
return nil data, ok := perfData[perftypes.EmptyInstance]
} if !ok {
return errors.New("perflib query for DNS returned empty result set")
// Win32_PerfRawData_DNS_DNS docs:
// - https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
// - https://technet.microsoft.com/en-us/library/cc977686.aspx
type Win32_PerfRawData_DNS_DNS struct {
AXFRRequestReceived uint32
AXFRRequestSent uint32
AXFRResponseReceived uint32
AXFRSuccessReceived uint32
AXFRSuccessSent uint32
CachingMemory uint32
DatabaseNodeMemory uint32
DynamicUpdateNoOperation uint32
DynamicUpdateQueued uint32
DynamicUpdateRejected uint32
DynamicUpdateTimeOuts uint32
DynamicUpdateWrittentoDatabase uint32
IXFRRequestReceived uint32
IXFRRequestSent uint32
IXFRResponseReceived uint32
IXFRSuccessSent uint32
IXFRTCPSuccessReceived uint32
IXFRUDPSuccessReceived uint32
NbstatMemory uint32
NotifyReceived uint32
NotifySent uint32
RecordFlowMemory uint32
RecursiveQueries uint32
RecursiveQueryFailure uint32
RecursiveSendTimeOuts uint32
SecureUpdateFailure uint32
SecureUpdateReceived uint32
TCPMessageMemory uint32
TCPQueryReceived uint32
TCPResponseSent uint32
UDPMessageMemory uint32
UDPQueryReceived uint32
UDPResponseSent uint32
UnmatchedResponsesReceived uint32
WINSLookupReceived uint32
WINSResponseSent uint32
WINSReverseLookupReceived uint32
WINSReverseResponseSent uint32
ZoneTransferFailure uint32
ZoneTransferSOARequestSent uint32
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_DNS_DNS
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_DNS_DNS", &dst); err != nil {
return err
}
if len(dst) == 0 {
return errors.New("WMI query returned empty result set")
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsReceived, c.zoneTransferRequestsReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].AXFRRequestReceived), data[axfrRequestReceived].FirstValue,
"full", "full",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsReceived, c.zoneTransferRequestsReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].IXFRRequestReceived), data[ixfrRequestReceived].FirstValue,
"incremental", "incremental",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent, c.zoneTransferRequestsSent,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].AXFRRequestSent), data[axfrRequestSent].FirstValue,
"full", "full",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent, c.zoneTransferRequestsSent,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].IXFRRequestSent), data[ixfrRequestSent].FirstValue,
"incremental", "incremental",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent, c.zoneTransferRequestsSent,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].ZoneTransferSOARequestSent), data[zoneTransferSOARequestSent].FirstValue,
"soa", "soa",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferResponsesReceived, c.zoneTransferResponsesReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].AXFRResponseReceived), data[axfrResponseReceived].FirstValue,
"full", "full",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferResponsesReceived, c.zoneTransferResponsesReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].IXFRResponseReceived), data[ixfrResponseReceived].FirstValue,
"incremental", "incremental",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived, c.zoneTransferSuccessReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].AXFRSuccessReceived), data[axfrSuccessReceived].FirstValue,
"full", "full",
"tcp", "tcp",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived, c.zoneTransferSuccessReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].IXFRTCPSuccessReceived), data[ixfrTCPSuccessReceived].FirstValue,
"incremental", "incremental",
"tcp", "tcp",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived, c.zoneTransferSuccessReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].IXFRTCPSuccessReceived), data[ixfrTCPSuccessReceived].FirstValue,
"incremental", "incremental",
"udp", "udp",
) )
@@ -359,183 +349,183 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessSent, c.zoneTransferSuccessSent,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].AXFRSuccessSent), data[axfrSuccessSent].FirstValue,
"full", "full",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessSent, c.zoneTransferSuccessSent,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].IXFRSuccessSent), data[ixfrSuccessSent].FirstValue,
"incremental", "incremental",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.zoneTransferFailures, c.zoneTransferFailures,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].ZoneTransferFailure), data[zoneTransferFailure].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes, c.memoryUsedBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(dst[0].CachingMemory), data[cachingMemory].FirstValue,
"caching", "caching",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes, c.memoryUsedBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(dst[0].DatabaseNodeMemory), data[databaseNodeMemory].FirstValue,
"database_node", "database_node",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes, c.memoryUsedBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(dst[0].NbstatMemory), data[nbStatMemory].FirstValue,
"nbstat", "nbstat",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes, c.memoryUsedBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(dst[0].RecordFlowMemory), data[recordFlowMemory].FirstValue,
"record_flow", "record_flow",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes, c.memoryUsedBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(dst[0].TCPMessageMemory), data[tcpMessageMemory].FirstValue,
"tcp_message", "tcp_message",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes, c.memoryUsedBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(dst[0].UDPMessageMemory), data[udpMessageMemory].FirstValue,
"udp_message", "udp_message",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesReceived, c.dynamicUpdatesReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].DynamicUpdateNoOperation), data[dynamicUpdateNoOperation].FirstValue,
"noop", "noop",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesReceived, c.dynamicUpdatesReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].DynamicUpdateWrittentoDatabase), data[dynamicUpdateWrittenToDatabase].FirstValue,
"written", "written",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesQueued, c.dynamicUpdatesQueued,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(dst[0].DynamicUpdateQueued), data[dynamicUpdateQueued].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesFailures, c.dynamicUpdatesFailures,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].DynamicUpdateRejected), data[dynamicUpdateRejected].FirstValue,
"rejected", "rejected",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesFailures, c.dynamicUpdatesFailures,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].DynamicUpdateTimeOuts), data[dynamicUpdateTimeOuts].FirstValue,
"timeout", "timeout",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.notifyReceived, c.notifyReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].NotifyReceived), data[notifyReceived].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.notifySent, c.notifySent,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].NotifySent), data[notifySent].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.recursiveQueries, c.recursiveQueries,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].RecursiveQueries), data[recursiveQueries].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.recursiveQueryFailures, c.recursiveQueryFailures,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].RecursiveQueryFailure), data[recursiveQueryFailure].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.recursiveQuerySendTimeouts, c.recursiveQuerySendTimeouts,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].RecursiveSendTimeOuts), data[recursiveSendTimeOuts].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.queries, c.queries,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].TCPQueryReceived), data[tcpQueryReceived].FirstValue,
"tcp", "tcp",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.queries, c.queries,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].UDPQueryReceived), data[udpQueryReceived].FirstValue,
"udp", "udp",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.responses, c.responses,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].TCPResponseSent), data[tcpResponseSent].FirstValue,
"tcp", "tcp",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.responses, c.responses,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].UDPResponseSent), data[udpResponseSent].FirstValue,
"udp", "udp",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.unmatchedResponsesReceived, c.unmatchedResponsesReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].UnmatchedResponsesReceived), data[unmatchedResponsesReceived].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.winsQueries, c.winsQueries,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].WINSLookupReceived), data[winsLookupReceived].FirstValue,
"forward", "forward",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.winsQueries, c.winsQueries,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].WINSReverseLookupReceived), data[winsReverseLookupReceived].FirstValue,
"reverse", "reverse",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.winsResponses, c.winsResponses,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].WINSResponseSent), data[winsResponseSent].FirstValue,
"forward", "forward",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.winsResponses, c.winsResponses,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].WINSReverseResponseSent), data[winsReverseResponseSent].FirstValue,
"reverse", "reverse",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.secureUpdateFailures, c.secureUpdateFailures,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].SecureUpdateFailure), data[secureUpdateFailure].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.secureUpdateReceived, c.secureUpdateReceived,
prometheus.CounterValue, prometheus.CounterValue,
float64(dst[0].SecureUpdateReceived), data[secureUpdateReceived].FirstValue,
) )
return nil return nil

View File

@@ -3,8 +3,8 @@ package dns_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/dns" "github.com/prometheus-community/windows_exporter/internal/collector/dns"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,352 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"os"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "exchange"
const (
adAccessProcesses = "ADAccessProcesses"
transportQueues = "TransportQueues"
httpProxy = "HttpProxy"
activeSync = "ActiveSync"
availabilityService = "AvailabilityService"
outlookWebAccess = "OutlookWebAccess"
autoDiscover = "Autodiscover"
workloadManagement = "WorkloadManagement"
rpcClientAccess = "RpcClientAccess"
mapiHttpEmsmdb = "MapiHttpEmsmdb"
)
type Config struct {
CollectorsEnabled []string `yaml:"collectors_enabled"`
}
var ConfigDefaults = Config{
CollectorsEnabled: []string{
adAccessProcesses,
transportQueues,
httpProxy,
activeSync,
availabilityService,
outlookWebAccess,
autoDiscover,
workloadManagement,
rpcClientAccess,
mapiHttpEmsmdb,
},
}
type Collector struct {
config Config
perfDataCollectorADAccessProcesses perfdata.Collector
perfDataCollectorTransportQueues perfdata.Collector
perfDataCollectorHttpProxy perfdata.Collector
perfDataCollectorActiveSync perfdata.Collector
perfDataCollectorAvailabilityService perfdata.Collector
perfDataCollectorOWA perfdata.Collector
perfDataCollectorAutoDiscover perfdata.Collector
perfDataCollectorWorkloadManagementWorkloads perfdata.Collector
perfDataCollectorRpcClientAccess perfdata.Collector
perfDataCollectorMapiHttpEmsmdb perfdata.Collector
activeMailboxDeliveryQueueLength *prometheus.Desc
activeSyncRequestsPerSec *prometheus.Desc
activeTasks *prometheus.Desc
activeUserCount *prometheus.Desc
activeUserCountMapiHttpEmsMDB *prometheus.Desc
autoDiscoverRequestsPerSec *prometheus.Desc
availabilityRequestsSec *prometheus.Desc
averageAuthenticationLatency *prometheus.Desc
averageCASProcessingLatency *prometheus.Desc
completedTasks *prometheus.Desc
connectionCount *prometheus.Desc
currentUniqueUsers *prometheus.Desc
externalActiveRemoteDeliveryQueueLength *prometheus.Desc
externalLargestDeliveryQueueLength *prometheus.Desc
internalActiveRemoteDeliveryQueueLength *prometheus.Desc
internalLargestDeliveryQueueLength *prometheus.Desc
isActive *prometheus.Desc
ldapReadTime *prometheus.Desc
ldapSearchTime *prometheus.Desc
ldapTimeoutErrorsPerSec *prometheus.Desc
ldapWriteTime *prometheus.Desc
longRunningLDAPOperationsPerMin *prometheus.Desc
mailboxServerLocatorAverageLatency *prometheus.Desc
mailboxServerProxyFailureRate *prometheus.Desc
outstandingProxyRequests *prometheus.Desc
owaRequestsPerSec *prometheus.Desc
pingCommandsPending *prometheus.Desc
poisonQueueLength *prometheus.Desc
proxyRequestsPerSec *prometheus.Desc
queuedTasks *prometheus.Desc
retryMailboxDeliveryQueueLength *prometheus.Desc
rpcAveragedLatency *prometheus.Desc
rpcOperationsPerSec *prometheus.Desc
rpcRequests *prometheus.Desc
syncCommandsPerSec *prometheus.Desc
unreachableQueueLength *prometheus.Desc
userCount *prometheus.Desc
yieldedTasks *prometheus.Desc
}
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
if config.CollectorsEnabled == nil {
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
}
c := &Collector{
config: *config,
}
return c
}
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
c.config.CollectorsEnabled = make([]string, 0)
var listAllCollectors bool
var collectorsEnabled string
app.Flag(
"collector.exchange.list",
"List the collectors along with their perflib object name/ids",
).BoolVar(&listAllCollectors)
app.Flag(
"collector.exchange.enabled",
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.PreAction(func(*kingpin.ParseContext) error {
if listAllCollectors {
collectorDesc := map[string]string{
adAccessProcesses: "[19108] MSExchange ADAccess Processes",
transportQueues: "[20524] MSExchangeTransport Queues",
httpProxy: "[36934] MSExchange HttpProxy",
activeSync: "[25138] MSExchange ActiveSync",
availabilityService: "[24914] MSExchange Availability Service",
outlookWebAccess: "[24618] MSExchange OWA",
autoDiscover: "[29240] MSExchange Autodiscover",
workloadManagement: "[19430] MSExchange WorkloadManagement Workloads",
rpcClientAccess: "[29336] MSExchange RpcClientAccess",
mapiHttpEmsmdb: "[26463] MSExchange MapiHttp Emsmdb",
}
sb := strings.Builder{}
sb.WriteString(fmt.Sprintf("%-32s %-32s\n", "Collector Name", "[PerfID] Perflib Object"))
for _, cname := range ConfigDefaults.CollectorsEnabled {
sb.WriteString(fmt.Sprintf("%-32s %-32s\n", cname, collectorDesc[cname]))
}
app.UsageTemplate(sb.String()).Usage(nil)
os.Exit(0)
}
return nil
})
app.Action(func(*kingpin.ParseContext) error {
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
return nil
})
return c
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{
"MSExchange ADAccess Processes",
"MSExchangeTransport Queues",
"MSExchange HttpProxy",
"MSExchange ActiveSync",
"MSExchange Availability Service",
"MSExchange OWA",
"MSExchangeAutodiscover",
"MSExchange WorkloadManagement Workloads",
"MSExchange RpcClientAccess",
"MSExchange MapiHttp Emsmdb",
}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
collectorFuncs := map[string]func() error{
adAccessProcesses: c.buildADAccessProcesses,
transportQueues: c.buildTransportQueues,
httpProxy: c.buildHTTPProxy,
activeSync: c.buildActiveSync,
availabilityService: c.buildAvailabilityService,
outlookWebAccess: c.buildOWA,
autoDiscover: c.buildAutoDiscover,
workloadManagement: c.buildWorkloadManagementWorkloads,
rpcClientAccess: c.buildRPC,
mapiHttpEmsmdb: c.buildMapiHttpEmsmdb,
}
for _, collectorName := range c.config.CollectorsEnabled {
if err := collectorFuncs[collectorName](); err != nil {
return err
}
}
}
// desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
return prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, metricName),
description,
labels,
nil,
)
}
c.rpcAveragedLatency = desc("rpc_avg_latency_sec", "The latency (sec) averaged for the past 1024 packets")
c.rpcRequests = desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service")
c.activeUserCount = desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes")
c.connectionCount = desc("rpc_connection_count", "Total number of client connections maintained")
c.rpcOperationsPerSec = desc("rpc_operations_total", "The rate at which RPC operations occur")
c.userCount = desc("rpc_user_count", "Number of users")
c.ldapReadTime = desc("ldap_read_time_sec", "Time (sec) to send an LDAP read request and receive a response", "name")
c.ldapSearchTime = desc("ldap_search_time_sec", "Time (sec) to send an LDAP search request and receive a response", "name")
c.ldapWriteTime = desc("ldap_write_time_sec", "Time (sec) to send an LDAP Add/Modify/Delete request and receive a response", "name")
c.ldapTimeoutErrorsPerSec = desc("ldap_timeout_errors_total", "Total number of LDAP timeout errors", "name")
c.longRunningLDAPOperationsPerMin = desc("ldap_long_running_ops_per_sec", "Long Running LDAP operations per second", "name")
c.externalActiveRemoteDeliveryQueueLength = desc("transport_queues_external_active_remote_delivery", "External Active Remote Delivery Queue length", "name")
c.internalActiveRemoteDeliveryQueueLength = desc("transport_queues_internal_active_remote_delivery", "Internal Active Remote Delivery Queue length", "name")
c.activeMailboxDeliveryQueueLength = desc("transport_queues_active_mailbox_delivery", "Active Mailbox Delivery Queue length", "name")
c.retryMailboxDeliveryQueueLength = desc("transport_queues_retry_mailbox_delivery", "Retry Mailbox Delivery Queue length", "name")
c.unreachableQueueLength = desc("transport_queues_unreachable", "Unreachable Queue length", "name")
c.externalLargestDeliveryQueueLength = desc("transport_queues_external_largest_delivery", "External Largest Delivery Queue length", "name")
c.internalLargestDeliveryQueueLength = desc("transport_queues_internal_largest_delivery", "Internal Largest Delivery Queue length", "name")
c.poisonQueueLength = desc("transport_queues_poison", "Poison Queue length", "name")
c.mailboxServerLocatorAverageLatency = desc("http_proxy_mailbox_server_locator_avg_latency_sec", "Average latency (sec) of MailboxServerLocator web service calls", "name")
c.averageAuthenticationLatency = desc("http_proxy_avg_auth_latency", "Average time spent authenticating CAS requests over the last 200 samples", "name")
c.outstandingProxyRequests = desc("http_proxy_outstanding_proxy_requests", "Number of concurrent outstanding proxy requests", "name")
c.proxyRequestsPerSec = desc("http_proxy_requests_total", "Number of proxy requests processed each second", "name")
c.availabilityRequestsSec = desc("avail_service_requests_per_sec", "Number of requests serviced per second")
c.currentUniqueUsers = desc("owa_current_unique_users", "Number of unique users currently logged on to Outlook Web App")
c.owaRequestsPerSec = desc("owa_requests_total", "Number of requests handled by Outlook Web App per second")
c.autoDiscoverRequestsPerSec = desc("autodiscover_requests_total", "Number of autodiscover service requests processed each second")
c.activeTasks = desc("workload_active_tasks", "Number of active tasks currently running in the background for workload management", "name")
c.completedTasks = desc("workload_completed_tasks", "Number of workload management tasks that have been completed", "name")
c.queuedTasks = desc("workload_queued_tasks", "Number of workload management tasks that are currently queued up waiting to be processed", "name")
c.yieldedTasks = desc("workload_yielded_tasks", "The total number of tasks that have been yielded by a workload", "name")
c.isActive = desc("workload_is_active", "Active indicates whether the workload is in an active (1) or paused (0) state", "name")
c.activeSyncRequestsPerSec = desc("activesync_requests_total", "Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load")
c.averageCASProcessingLatency = desc("http_proxy_avg_cas_processing_latency_sec", "Average latency (sec) of CAS processing time over the last 200 reqs", "name")
c.mailboxServerProxyFailureRate = desc("http_proxy_mailbox_proxy_failure_rate", "% of failures between this CAS and MBX servers over the last 200 samples", "name")
c.pingCommandsPending = desc("activesync_ping_cmds_pending", "Number of ping commands currently pending in the queue")
c.syncCommandsPerSec = desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder")
c.activeUserCountMapiHttpEmsMDB = desc("mapihttp_emsmdb_active_user_count", "Number of unique outlook users that have shown some kind of activity in the last 2 minutes")
return nil
}
// Collect collects exchange metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if utils.PDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
collectorFuncs := map[string]func(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error{
adAccessProcesses: c.collectADAccessProcesses,
transportQueues: c.collectTransportQueues,
httpProxy: c.collectHTTPProxy,
activeSync: c.collectActiveSync,
availabilityService: c.collectAvailabilityService,
outlookWebAccess: c.collectOWA,
autoDiscover: c.collectAutoDiscover,
workloadManagement: c.collectWorkloadManagementWorkloads,
rpcClientAccess: c.collectRPC,
mapiHttpEmsmdb: c.collectMapiHttpEmsmdb,
}
for _, collectorName := range c.config.CollectorsEnabled {
if err := collectorFuncs[collectorName](ctx, logger, ch); err != nil {
logger.Error("Error in "+collectorName,
slog.Any("err", err),
)
return err
}
}
return nil
}
// Collect collects exchange metrics and sends them to prometheus.
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ch chan<- prometheus.Metric) error{
adAccessProcesses: c.collectPDHADAccessProcesses,
transportQueues: c.collectPDHTransportQueues,
httpProxy: c.collectPDHHTTPProxy,
activeSync: c.collectPDHActiveSync,
availabilityService: c.collectPDHAvailabilityService,
outlookWebAccess: c.collectPDHOWA,
autoDiscover: c.collectPDHAutoDiscover,
workloadManagement: c.collectPDHWorkloadManagementWorkloads,
rpcClientAccess: c.collectPDHRPC,
mapiHttpEmsmdb: c.collectPDHMapiHttpEmsmdb,
}
errs := make([]error, len(c.config.CollectorsEnabled))
for i, collectorName := range c.config.CollectorsEnabled {
errs[i] = collectorFuncs[collectorName](ch)
}
return errors.Join(errs...)
}
// toLabelName converts strings to lowercase and replaces all whitespaces and dots with underscores.
func (c *Collector) toLabelName(name string) string {
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
s = strings.ReplaceAll(s, "__", "_")
return s
}
// msToSec converts from ms to seconds.
func (c *Collector) msToSec(t float64) float64 {
return t / 1000
}

View File

@@ -0,0 +1,101 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
requestsPerSec = "Requests/sec"
pingCommandsPending = "Ping Commands Pending"
syncCommandsPerSec = "Sync Commands/sec"
)
// Perflib: [25138] MSExchange ActiveSync.
type perflibActiveSync struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
PingCommandsPending float64 `perflib:"Ping Commands Pending"`
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
}
func (c *Collector) buildActiveSync() error {
counters := []string{
requestsPerSec,
pingCommandsPending,
syncCommandsPerSec,
}
var err error
c.perfDataCollectorActiveSync, err = perfdata.NewCollector(perfdata.V1, "MSExchange ActiveSync", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
}
return nil
}
func (c *Collector) collectActiveSync(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibActiveSync
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange ActiveSync"], &data, logger); err != nil {
return err
}
for _, instance := range data {
ch <- prometheus.MustNewConstMetric(
c.activeSyncRequestsPerSec,
prometheus.CounterValue,
instance.RequestsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.pingCommandsPending,
prometheus.GaugeValue,
instance.PingCommandsPending,
)
ch <- prometheus.MustNewConstMetric(
c.syncCommandsPerSec,
prometheus.CounterValue,
instance.SyncCommandsPerSec,
)
}
return nil
}
func (c *Collector) collectPDHActiveSync(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorActiveSync.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange ActiveSync metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange ActiveSync returned empty result set")
}
for _, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.activeSyncRequestsPerSec,
prometheus.CounterValue,
data[requestsPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.pingCommandsPending,
prometheus.GaugeValue,
data[pingCommandsPending].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.syncCommandsPerSec,
prometheus.CounterValue,
data[syncCommandsPerSec].FirstValue,
)
}
return nil
}

View File

@@ -0,0 +1,164 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"strings"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
ldapReadTime = "LDAP Read Time"
ldapSearchTime = "LDAP Search Time"
ldapWriteTime = "LDAP Write Time"
ldapTimeoutErrorsPerSec = "LDAP Timeout Errors/sec"
longRunningLDAPOperationsPerMin = "Long Running LDAP Operations/min"
)
// Perflib: [19108] MSExchange ADAccess Processes.
type perflibADAccessProcesses struct {
Name string
LDAPReadTime float64 `perflib:"LDAP Read Time"`
LDAPSearchTime float64 `perflib:"LDAP Search Time"`
LDAPWriteTime float64 `perflib:"LDAP Write Time"`
LDAPTimeoutErrorsPerSec float64 `perflib:"LDAP Timeout Errors/sec"`
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
}
func (c *Collector) buildADAccessProcesses() error {
counters := []string{
ldapReadTime,
ldapSearchTime,
ldapWriteTime,
ldapTimeoutErrorsPerSec,
longRunningLDAPOperationsPerMin,
}
var err error
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector(perfdata.V1, "MSExchange ADAccess Processes", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
}
return nil
}
func (c *Collector) collectADAccessProcesses(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibADAccessProcesses
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange ADAccess Processes"], &data, logger); err != nil {
return err
}
labelUseCount := make(map[string]int)
for _, proc := range data {
labelName := c.toLabelName(proc.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
// Since we're not including the PID suffix from the instance names in the label names, we get an occasional duplicate.
// This seems to affect about 4 instances only of this object.
labelUseCount[labelName]++
if labelUseCount[labelName] > 1 {
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
}
ch <- prometheus.MustNewConstMetric(
c.ldapReadTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPReadTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapSearchTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPSearchTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapWriteTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPWriteTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapTimeoutErrorsPerSec,
prometheus.CounterValue,
proc.LDAPTimeoutErrorsPerSec,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.longRunningLDAPOperationsPerMin,
prometheus.CounterValue,
proc.LongRunningLDAPOperationsPerMin*60,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHADAccessProcesses(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorADAccessProcesses.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange ADAccess Processes metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange ADAccess Processes returned empty result set")
}
labelUseCount := make(map[string]int)
for name, data := range perfData {
labelName := c.toLabelName(name)
// Since we're not including the PID suffix from the instance names in the label names, we get an occasional duplicate.
// This seems to affect about 4 instances only of this object.
labelUseCount[labelName]++
if labelUseCount[labelName] > 1 {
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
}
ch <- prometheus.MustNewConstMetric(
c.ldapReadTime,
prometheus.CounterValue,
c.msToSec(data[ldapReadTime].FirstValue),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapSearchTime,
prometheus.CounterValue,
c.msToSec(data[ldapSearchTime].FirstValue),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapWriteTime,
prometheus.CounterValue,
c.msToSec(data[ldapWriteTime].FirstValue),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapTimeoutErrorsPerSec,
prometheus.CounterValue,
data[ldapTimeoutErrorsPerSec].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.longRunningLDAPOperationsPerMin,
prometheus.CounterValue,
data[longRunningLDAPOperationsPerMin].FirstValue*60,
labelName,
)
}
return nil
}

View File

@@ -0,0 +1,71 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// [29240] MSExchangeAutodiscover.
type perflibAutodiscover struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *Collector) buildAutoDiscover() error {
counters := []string{
requestsPerSec,
}
var err error
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector(perfdata.V1, "MSExchange Autodiscover", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
}
return nil
}
func (c *Collector) collectAutoDiscover(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibAutodiscover
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchangeAutodiscover"], &data, logger); err != nil {
return err
}
for _, autodisc := range data {
ch <- prometheus.MustNewConstMetric(
c.autoDiscoverRequestsPerSec,
prometheus.CounterValue,
autodisc.RequestsPerSec,
)
}
return nil
}
func (c *Collector) collectPDHAutoDiscover(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorAutoDiscover.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange Autodiscover metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange Autodiscover returned empty result set")
}
for _, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.autoDiscoverRequestsPerSec,
prometheus.CounterValue,
data[requestsPerSec].FirstValue,
)
}
return nil
}

View File

@@ -0,0 +1,69 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// Perflib: [24914] MSExchange Availability Service.
type perflibAvailabilityService struct {
RequestsSec float64 `perflib:"Availability Requests (sec)"`
}
func (c *Collector) buildAvailabilityService() error {
counters := []string{}
var err error
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector(perfdata.V1, "MSExchange Availability Service", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
}
return nil
}
func (c *Collector) collectAvailabilityService(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibAvailabilityService
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange Availability Service"], &data, logger); err != nil {
return err
}
for _, availservice := range data {
ch <- prometheus.MustNewConstMetric(
c.availabilityRequestsSec,
prometheus.CounterValue,
availservice.RequestsSec,
)
}
return nil
}
func (c *Collector) collectPDHAvailabilityService(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorAvailabilityService.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange Availability Service metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange Availability Service returned empty result set")
}
for _, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.availabilityRequestsSec,
prometheus.CounterValue,
data[requestsPerSec].FirstValue,
)
}
return nil
}

View File

@@ -0,0 +1,156 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
mailboxServerLocatorAverageLatency = "MailboxServerLocator Average Latency (Moving Average)"
averageAuthenticationLatency = "Average Authentication Latency"
averageCASProcessingLatency = "Average ClientAccess Server Processing Latency"
mailboxServerProxyFailureRate = "Mailbox Server Proxy Failure Rate"
outstandingProxyRequests = "Outstanding Proxy Requests"
proxyRequestsPerSec = "Proxy Requests/Sec"
)
// Perflib: [36934] MSExchange HttpProxy.
type perflibHTTPProxy struct {
Name string
MailboxServerLocatorAverageLatency float64 `perflib:"MailboxServerLocator Average Latency (Moving Average)"`
AverageAuthenticationLatency float64 `perflib:"Average Authentication Latency"`
AverageCASProcessingLatency float64 `perflib:"Average ClientAccess Server Processing Latency"`
MailboxServerProxyFailureRate float64 `perflib:"Mailbox Server Proxy Failure Rate"`
OutstandingProxyRequests float64 `perflib:"Outstanding Proxy Requests"`
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
}
func (c *Collector) buildHTTPProxy() error {
counters := []string{
mailboxServerLocatorAverageLatency,
averageAuthenticationLatency,
averageCASProcessingLatency,
mailboxServerProxyFailureRate,
outstandingProxyRequests,
proxyRequestsPerSec,
}
var err error
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector(perfdata.V1, "MSExchange HttpProxy", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
}
return nil
}
func (c *Collector) collectHTTPProxy(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibHTTPProxy
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange HttpProxy"], &data, logger); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerLocatorAverageLatency,
prometheus.GaugeValue,
c.msToSec(instance.MailboxServerLocatorAverageLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageAuthenticationLatency,
prometheus.GaugeValue,
instance.AverageAuthenticationLatency,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageCASProcessingLatency,
prometheus.GaugeValue,
c.msToSec(instance.AverageCASProcessingLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerProxyFailureRate,
prometheus.GaugeValue,
instance.MailboxServerProxyFailureRate,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.outstandingProxyRequests,
prometheus.GaugeValue,
instance.OutstandingProxyRequests,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.proxyRequestsPerSec,
prometheus.CounterValue,
instance.ProxyRequestsPerSec,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHHTTPProxy(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorHttpProxy.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange HttpProxy Service metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange HttpProxy Service returned empty result set")
}
for name, data := range perfData {
labelName := c.toLabelName(name)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerLocatorAverageLatency,
prometheus.GaugeValue,
c.msToSec(data[mailboxServerLocatorAverageLatency].FirstValue),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageAuthenticationLatency,
prometheus.GaugeValue,
data[averageAuthenticationLatency].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageCASProcessingLatency,
prometheus.GaugeValue,
c.msToSec(data[averageCASProcessingLatency].FirstValue),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerProxyFailureRate,
prometheus.GaugeValue,
data[mailboxServerProxyFailureRate].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.outstandingProxyRequests,
prometheus.GaugeValue,
data[outstandingProxyRequests].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.proxyRequestsPerSec,
prometheus.CounterValue,
data[proxyRequestsPerSec].FirstValue,
labelName,
)
}
return nil
}

View File

@@ -0,0 +1,75 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
activeUserCount = "Active User Count"
)
// perflib [26463] MSExchange MapiHttp Emsmdb.
type perflibMapiHttpEmsmdb struct {
ActiveUserCount float64 `perflib:"Active User Count"`
}
func (c *Collector) buildMapiHttpEmsmdb() error {
counters := []string{
activeUserCount,
}
var err error
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector(perfdata.V1, "MSExchange MapiHttp Emsmdb", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
}
return nil
}
func (c *Collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibMapiHttpEmsmdb
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange MapiHttp Emsmdb"], &data, logger); err != nil {
return err
}
for _, mapihttp := range data {
ch <- prometheus.MustNewConstMetric(
c.activeUserCountMapiHttpEmsMDB,
prometheus.GaugeValue,
mapihttp.ActiveUserCount,
)
}
return nil
}
func (c *Collector) collectPDHMapiHttpEmsmdb(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorMapiHttpEmsmdb.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange MapiHttp Emsmdb metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange MapiHttp Emsmdb returned empty result set")
}
for _, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.activeUserCountMapiHttpEmsMDB,
prometheus.GaugeValue,
data[activeUserCount].FirstValue,
)
}
return nil
}

View File

@@ -0,0 +1,88 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
currentUniqueUsers = "Current Unique Users"
// requestsPerSec = "Requests/sec"
)
// Perflib: [24618] MSExchange OWA.
type perflibOWA struct {
CurrentUniqueUsers float64 `perflib:"Current Unique Users"`
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *Collector) buildOWA() error {
counters := []string{
currentUniqueUsers,
requestsPerSec,
}
var err error
c.perfDataCollectorOWA, err = perfdata.NewCollector(perfdata.V1, "MSExchange OWA", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
}
return nil
}
func (c *Collector) collectOWA(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibOWA
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange OWA"], &data, logger); err != nil {
return err
}
for _, owa := range data {
ch <- prometheus.MustNewConstMetric(
c.currentUniqueUsers,
prometheus.GaugeValue,
owa.CurrentUniqueUsers,
)
ch <- prometheus.MustNewConstMetric(
c.owaRequestsPerSec,
prometheus.CounterValue,
owa.RequestsPerSec,
)
}
return nil
}
func (c *Collector) collectPDHOWA(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorOWA.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange OWA metrics: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange OWA returned empty result set")
}
for _, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.currentUniqueUsers,
prometheus.GaugeValue,
data[currentUniqueUsers].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.owaRequestsPerSec,
prometheus.CounterValue,
data[requestsPerSec].FirstValue,
)
}
return nil
}

View File

@@ -0,0 +1,140 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
rpcAveragedLatency = "RPC Averaged Latency"
rpcRequests = "RPC Requests"
// activeUserCount = "Active User Count"
connectionCount = "Connection Count"
rpcOperationsPerSec = "RPC Operations/sec"
userCount = "User Count"
)
// Perflib: [29366] MSExchange RpcClientAccess.
type perflibRPCClientAccess struct {
RPCAveragedLatency float64 `perflib:"RPC Averaged Latency"`
RPCRequests float64 `perflib:"RPC Requests"`
ActiveUserCount float64 `perflib:"Active User Count"`
ConnectionCount float64 `perflib:"Connection Count"`
RPCOperationsPerSec float64 `perflib:"RPC Operations/sec"`
UserCount float64 `perflib:"User Count"`
}
func (c *Collector) buildRPC() error {
counters := []string{
rpcAveragedLatency,
rpcRequests,
activeUserCount,
connectionCount,
rpcOperationsPerSec,
userCount,
}
var err error
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector(perfdata.V1, "MSExchange RpcClientAccess", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
}
return nil
}
func (c *Collector) collectRPC(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibRPCClientAccess
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange RpcClientAccess"], &data, logger); err != nil {
return err
}
for _, rpc := range data {
ch <- prometheus.MustNewConstMetric(
c.rpcAveragedLatency,
prometheus.GaugeValue,
c.msToSec(rpc.RPCAveragedLatency),
)
ch <- prometheus.MustNewConstMetric(
c.rpcRequests,
prometheus.GaugeValue,
rpc.RPCRequests,
)
ch <- prometheus.MustNewConstMetric(
c.activeUserCount,
prometheus.GaugeValue,
rpc.ActiveUserCount,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCount,
prometheus.GaugeValue,
rpc.ConnectionCount,
)
ch <- prometheus.MustNewConstMetric(
c.rpcOperationsPerSec,
prometheus.CounterValue,
rpc.RPCOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.userCount,
prometheus.GaugeValue,
rpc.UserCount,
)
}
return nil
}
func (c *Collector) collectPDHRPC(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorRpcClientAccess.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange RpcClientAccess: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange RpcClientAccess returned empty result set")
}
for _, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.rpcAveragedLatency,
prometheus.GaugeValue,
c.msToSec(data[rpcAveragedLatency].FirstValue),
)
ch <- prometheus.MustNewConstMetric(
c.rpcRequests,
prometheus.GaugeValue,
data[rpcRequests].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.activeUserCount,
prometheus.GaugeValue,
data[activeUserCount].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCount,
prometheus.GaugeValue,
data[connectionCount].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.rpcOperationsPerSec,
prometheus.CounterValue,
data[rpcOperationsPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.userCount,
prometheus.GaugeValue,
data[userCount].FirstValue,
)
}
return nil
}

View File

@@ -3,8 +3,8 @@ package exchange_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange" "github.com/prometheus-community/windows_exporter/internal/collector/exchange"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,191 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"strings"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
externalActiveRemoteDeliveryQueueLength = "External Active Remote Delivery Queue Length"
internalActiveRemoteDeliveryQueueLength = "Internal Active Remote Delivery Queue Length"
activeMailboxDeliveryQueueLength = "Active Mailbox Delivery Queue Length"
retryMailboxDeliveryQueueLength = "Retry Mailbox Delivery Queue Length"
unreachableQueueLength = "Unreachable Queue Length"
externalLargestDeliveryQueueLength = "External Largest Delivery Queue Length"
internalLargestDeliveryQueueLength = "Internal Largest Delivery Queue Length"
poisonQueueLength = "Poison Queue Length"
)
// Perflib: [20524] MSExchangeTransport Queues.
type perflibTransportQueues struct {
Name string
ExternalActiveRemoteDeliveryQueueLength float64 `perflib:"External Active Remote Delivery Queue Length"`
InternalActiveRemoteDeliveryQueueLength float64 `perflib:"Internal Active Remote Delivery Queue Length"`
ActiveMailboxDeliveryQueueLength float64 `perflib:"Active Mailbox Delivery Queue Length"`
RetryMailboxDeliveryQueueLength float64 `perflib:"Retry Mailbox Delivery Queue Length"`
UnreachableQueueLength float64 `perflib:"Unreachable Queue Length"`
ExternalLargestDeliveryQueueLength float64 `perflib:"External Largest Delivery Queue Length"`
InternalLargestDeliveryQueueLength float64 `perflib:"Internal Largest Delivery Queue Length"`
PoisonQueueLength float64 `perflib:"Poison Queue Length"`
}
func (c *Collector) buildTransportQueues() error {
counters := []string{
externalActiveRemoteDeliveryQueueLength,
internalActiveRemoteDeliveryQueueLength,
activeMailboxDeliveryQueueLength,
retryMailboxDeliveryQueueLength,
unreachableQueueLength,
externalLargestDeliveryQueueLength,
internalLargestDeliveryQueueLength,
poisonQueueLength,
}
var err error
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector(perfdata.V1, "MSExchangeTransport Queues", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
}
return nil
}
func (c *Collector) collectTransportQueues(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibTransportQueues
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchangeTransport Queues"], &data, logger); err != nil {
return err
}
for _, queue := range data {
labelName := c.toLabelName(queue.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.externalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.activeMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.ActiveMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.retryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.RetryMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.unreachableQueueLength,
prometheus.GaugeValue,
queue.UnreachableQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.externalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.poisonQueueLength,
prometheus.GaugeValue,
queue.PoisonQueueLength,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHTransportQueues(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorTransportQueues.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchangeTransport Queues: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchangeTransport Queues returned empty result set")
}
for name, data := range perfData {
labelName := c.toLabelName(name)
ch <- prometheus.MustNewConstMetric(
c.externalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
data[externalActiveRemoteDeliveryQueueLength].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
data[internalActiveRemoteDeliveryQueueLength].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.activeMailboxDeliveryQueueLength,
prometheus.GaugeValue,
data[activeMailboxDeliveryQueueLength].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.retryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
data[retryMailboxDeliveryQueueLength].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.unreachableQueueLength,
prometheus.GaugeValue,
data[unreachableQueueLength].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.externalLargestDeliveryQueueLength,
prometheus.GaugeValue,
data[externalLargestDeliveryQueueLength].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalLargestDeliveryQueueLength,
prometheus.GaugeValue,
data[internalLargestDeliveryQueueLength].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.poisonQueueLength,
prometheus.GaugeValue,
data[poisonQueueLength].FirstValue,
labelName,
)
}
return nil
}

View File

@@ -0,0 +1,146 @@
package exchange
import (
"errors"
"fmt"
"log/slog"
"strings"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
activeTasks = "ActiveTasks"
completedTasks = "CompletedTasks"
queuedTasks = "QueuedTasks"
yieldedTasks = "YieldedTasks"
isActive = "Active"
)
// Perflib: [19430] MSExchange WorkloadManagement Workloads.
type perflibWorkloadManagementWorkloads struct {
Name string
ActiveTasks float64 `perflib:"ActiveTasks"`
CompletedTasks float64 `perflib:"CompletedTasks"`
QueuedTasks float64 `perflib:"QueuedTasks"`
YieldedTasks float64 `perflib:"YieldedTasks"`
IsActive float64 `perflib:"Active"`
}
func (c *Collector) buildWorkloadManagementWorkloads() error {
counters := []string{
activeTasks,
completedTasks,
queuedTasks,
yieldedTasks,
isActive,
}
var err error
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector(perfdata.V1, "MSExchange WorkloadManagement Workloads", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
}
return nil
}
func (c *Collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibWorkloadManagementWorkloads
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange WorkloadManagement Workloads"], &data, logger); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.activeTasks,
prometheus.GaugeValue,
instance.ActiveTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.completedTasks,
prometheus.CounterValue,
instance.CompletedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.queuedTasks,
prometheus.CounterValue,
instance.QueuedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.yieldedTasks,
prometheus.CounterValue,
instance.YieldedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.isActive,
prometheus.GaugeValue,
instance.IsActive,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHWorkloadManagementWorkloads(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorWorkloadManagementWorkloads.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange WorkloadManagement Workloads: %w", err)
}
if len(perfData) == 0 {
return errors.New("perflib query for MSExchange WorkloadManagement Workloads returned empty result set")
}
for name, data := range perfData {
labelName := c.toLabelName(name)
ch <- prometheus.MustNewConstMetric(
c.activeTasks,
prometheus.GaugeValue,
data[activeTasks].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.completedTasks,
prometheus.CounterValue,
data[completedTasks].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.queuedTasks,
prometheus.CounterValue,
data[queuedTasks].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.yieldedTasks,
prometheus.CounterValue,
data[yieldedTasks].FirstValue,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.isActive,
prometheus.GaugeValue,
data[isActive].FirstValue,
labelName,
)
}
return nil
}

View File

@@ -12,19 +12,19 @@ import (
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/bmatcuk/doublestar/v4" "github.com/bmatcuk/doublestar/v4"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "filetime" const Name = "filetime"
type Config struct { type Config struct {
filePatterns []string FilePatterns []string
} }
var ConfigDefaults = Config{ var ConfigDefaults = Config{
filePatterns: []string{}, FilePatterns: []string{},
} }
// A Collector is a Prometheus Collector for collecting file times. // A Collector is a Prometheus Collector for collecting file times.
@@ -39,8 +39,8 @@ func New(config *Config) *Collector {
config = &ConfigDefaults config = &ConfigDefaults
} }
if config.filePatterns == nil { if config.FilePatterns == nil {
config.filePatterns = ConfigDefaults.filePatterns config.FilePatterns = ConfigDefaults.FilePatterns
} }
c := &Collector{ c := &Collector{
@@ -54,18 +54,18 @@ func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{ c := &Collector{
config: ConfigDefaults, config: ConfigDefaults,
} }
c.config.filePatterns = make([]string, 0) c.config.FilePatterns = make([]string, 0)
var filePatterns string var filePatterns string
app.Flag( app.Flag(
"collectors.filetime.file-patterns", "collector.filetime.file-patterns",
"Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive). See https://github.com/bmatcuk/doublestar#patterns", "Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive). See https://github.com/bmatcuk/doublestar#patterns",
).Default(strings.Join(ConfigDefaults.filePatterns, ",")).StringVar(&filePatterns) ).Default(strings.Join(ConfigDefaults.FilePatterns, ",")).StringVar(&filePatterns)
app.Action(func(*kingpin.ParseContext) error { app.Action(func(*kingpin.ParseContext) error {
// doublestar.Glob() requires forward slashes // doublestar.Glob() requires forward slashes
c.config.filePatterns = strings.Split(filepath.ToSlash(filePatterns), ",") c.config.FilePatterns = strings.Split(filepath.ToSlash(filePatterns), ",")
return nil return nil
}) })
@@ -85,7 +85,7 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Info("filetime collector is in an experimental state! It may subject to change.", logger.Info("filetime collector is in an experimental state! It may subject to change.",
slog.String("collector", Name), slog.String("collector", Name),
) )
@@ -97,7 +97,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
nil, nil,
) )
for _, filePattern := range c.config.filePatterns { for _, filePattern := range c.config.FilePatterns {
basePath, pattern := doublestar.SplitPattern(filePattern) basePath, pattern := doublestar.SplitPattern(filePattern)
_, err := doublestar.Glob(os.DirFS(basePath), pattern, doublestar.WithFilesOnly()) _, err := doublestar.Glob(os.DirFS(basePath), pattern, doublestar.WithFilesOnly())
@@ -121,7 +121,7 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
func (c *Collector) collectGlob(logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectGlob(logger *slog.Logger, ch chan<- prometheus.Metric) error {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
for _, filePattern := range c.config.filePatterns { for _, filePattern := range c.config.FilePatterns {
wg.Add(1) wg.Add(1)
go func(filePattern string) { go func(filePattern string) {

View File

@@ -0,0 +1,18 @@
package filetime_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/filetime"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, filetime.Name, filetime.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, filetime.New, &filetime.Config{
FilePatterns: []string{"*.*"},
})
}

View File

@@ -4,13 +4,14 @@ package fsrmquota
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "fsrmquota" const Name = "fsrmquota"
@@ -21,7 +22,8 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
wmiClient *wmi.Client miSession *mi.Session
miQuery mi.Query
quotasCount *prometheus.Desc quotasCount *prometheus.Desc
peakUsage *prometheus.Desc peakUsage *prometheus.Desc
@@ -63,12 +65,18 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { if miSession == nil {
return errors.New("wmiClient or SWbemServicesClient is nil") return errors.New("miSession is nil")
} }
c.wmiClient = wmiClient miQuery, err := mi.NewQuery("SELECT * FROM MSFT_FSRMQuota")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQuery = miQuery
c.miSession = miSession
c.quotasCount = prometheus.NewDesc( c.quotasCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "count"), prometheus.BuildFQName(types.Namespace, Name, "count"),
@@ -146,29 +154,28 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
// MSFT_FSRMQuota docs: // MSFT_FSRMQuota docs:
// https://docs.microsoft.com/en-us/previous-versions/windows/desktop/fsrm/msft-fsrmquota // https://docs.microsoft.com/en-us/previous-versions/windows/desktop/fsrm/msft-fsrmquota
type MSFT_FSRMQuota struct { type MSFT_FSRMQuota struct {
Name string Name string `mi:"Name"`
Path string Path string `mi:"Path"`
PeakUsage uint64 PeakUsage uint64 `mi:"PeakUsage"`
Size uint64 Size uint64 `mi:"Size"`
Usage uint64 Usage uint64 `mi:"Usage"`
Description string Description string `mi:"Description"`
Template string Template string `mi:"Template"`
// Threshold string // Threshold string `mi:"Threshold"`
Disabled bool Disabled bool `mi:"Disabled"`
MatchesTemplate bool MatchesTemplate bool `mi:"MatchesTemplate"`
SoftLimit bool SoftLimit bool `mi:"SoftLimit"`
} }
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []MSFT_FSRMQuota var dst []MSFT_FSRMQuota
if err := c.miSession.Query(&dst, mi.NamespaceRootWindowsFSRM, c.miQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)
}
var count int var count int
if err := c.wmiClient.Query("SELECT * FROM MSFT_FSRMQuota", &dst, nil, "root/microsoft/windows/fsrm"); err != nil {
return err
}
for _, quota := range dst { for _, quota := range dst {
count++ count++
path := quota.Path path := quota.Path

View File

@@ -3,8 +3,8 @@ package fsrmquota_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota" "github.com/prometheus-community/windows_exporter/internal/collector/fsrmquota"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -9,9 +9,10 @@ import (
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "hyperv" const Name = "hyperv"
@@ -23,7 +24,7 @@ var ConfigDefaults = Config{}
// Collector is a Prometheus Collector for hyper-v. // Collector is a Prometheus Collector for hyper-v.
type Collector struct { type Collector struct {
config Config config Config
wmiClient *wmi.Client miSession *mi.Session
// Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary // Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
healthCritical *prometheus.Desc healthCritical *prometheus.Desc
@@ -168,12 +169,12 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { if miSession == nil {
return errors.New("wmiClient or SWbemServicesClient is nil") return errors.New("miSession is nil")
} }
c.wmiClient = wmiClient c.miSession = miSession
buildSubsystemName := func(component string) string { return "hyperv_" + component } buildSubsystemName := func(component string) string { return "hyperv_" + component }
@@ -858,14 +859,14 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
// Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary vm health status. // Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary vm health status.
type Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary struct { type Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary struct {
HealthCritical uint32 HealthCritical uint32 `mi:"HealthCritical"`
HealthOk uint32 HealthOk uint32 `mi:"HealthOK"`
} }
func (c *Collector) collectVmHealth(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmHealth(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary var dst []Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, health := range dst { for _, health := range dst {
@@ -887,16 +888,16 @@ func (c *Collector) collectVmHealth(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition ..,. // Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition ..,.
type Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition struct { type Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition struct {
Name string Name string `mi:"Name"`
PhysicalPagesAllocated uint64 PhysicalPagesAllocated uint64 `mi:"PhysicalPagesAllocated"`
PreferredNUMANodeIndex uint64 PreferredNUMANodeIndex uint64 `mi:"PreferredNUMANodeIndex"`
RemotePhysicalPages uint64 RemotePhysicalPages uint64 `mi:"RemotePhysicalPages"`
} }
func (c *Collector) collectVmVid(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmVid(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition var dst []Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, page := range dst { for _, page := range dst {
@@ -931,34 +932,34 @@ func (c *Collector) collectVmVid(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition ... // Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition ...
type Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition struct { type Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition struct {
Name string Name string `mi:"Name"`
AddressSpaces uint64 AddressSpaces uint64 `mi:"AddressSpaces"`
AttachedDevices uint64 AttachedDevices uint64 `mi:"AttachedDevices"`
DepositedPages uint64 DepositedPages uint64 `mi:"DepositedPages"`
DeviceDMAErrors uint64 DeviceDMAErrors uint64 `mi:"DeviceDMAErrors"`
DeviceInterruptErrors uint64 DeviceInterruptErrors uint64 `mi:"DeviceInterruptErrors"`
DeviceInterruptMappings uint64 DeviceInterruptMappings uint64 `mi:"DeviceInterruptMappings"`
DeviceInterruptThrottleEvents uint64 DeviceInterruptThrottleEvents uint64 `mi:"DeviceInterruptThrottleEvents"`
GPAPages uint64 GPAPages uint64 `mi:"GPAPages"`
GPASpaceModificationsPersec uint64 GPASpaceModificationsPersec uint64 `mi:"GPASpaceModificationsPersec"`
IOTLBFlushCost uint64 IOTLBFlushCost uint64 `mi:"IOTLBFlushCost"`
IOTLBFlushesPersec uint64 IOTLBFlushesPersec uint64 `mi:"IOTLBFlushesPersec"`
RecommendedVirtualTLBSize uint64 RecommendedVirtualTLBSize uint64 `mi:"RecommendedVirtualTLBSize"`
SkippedTimerTicks uint64 SkippedTimerTicks uint64 `mi:"SkippedTimerTicks"`
Value1Gdevicepages uint64 Value1Gdevicepages uint64 `mi:"Value1Gdevicepages"`
Value1GGPApages uint64 Value1GGPApages uint64 `mi:"Value1GGPApages"`
Value2Mdevicepages uint64 Value2Mdevicepages uint64 `mi:"Value2Mdevicepages"`
Value2MGPApages uint64 Value2MGPApages uint64 `mi:"Value2MGPApages"`
Value4Kdevicepages uint64 Value4Kdevicepages uint64 `mi:"Value4Kdevicepages"`
Value4KGPApages uint64 Value4KGPApages uint64 `mi:"Value4KGPApages"`
VirtualTLBFlushEntiresPersec uint64 VirtualTLBFlushEntiresPersec uint64 `mi:"VirtualTLBFlushEntiresPersec"`
VirtualTLBPages uint64 VirtualTLBPages uint64 `mi:"VirtualTLBPages"`
} }
func (c *Collector) collectVmHv(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmHv(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1087,14 +1088,14 @@ func (c *Collector) collectVmHv(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_HvStats_HyperVHypervisor ... // Win32_PerfRawData_HvStats_HyperVHypervisor ...
type Win32_PerfRawData_HvStats_HyperVHypervisor struct { type Win32_PerfRawData_HvStats_HyperVHypervisor struct {
LogicalProcessors uint64 LogicalProcessors uint64 `mi:"LogicalProcessors"`
VirtualProcessors uint64 VirtualProcessors uint64 `mi:"VirtualProcessors"`
} }
func (c *Collector) collectVmProcessor(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmProcessor(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisor var dst []Win32_PerfRawData_HvStats_HyperVHypervisor
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisor", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_HvStats_HyperVHypervisor"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1116,16 +1117,16 @@ func (c *Collector) collectVmProcessor(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor ... // Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor ...
type Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor struct { type Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor struct {
Name string Name string `mi:"Name"`
PercentGuestRunTime uint64 PercentGuestRunTime uint64 `mi:"PercentGuestRunTime"`
PercentHypervisorRunTime uint64 PercentHypervisorRunTime uint64 `mi:"PercentHypervisorRunTime"`
PercentTotalRunTime uint PercentTotalRunTime uint64 `mi:"PercentTotalRunTime"`
} }
func (c *Collector) collectHostLPUsage(logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectHostLPUsage(logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor var dst []Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1170,18 +1171,18 @@ func (c *Collector) collectHostLPUsage(logger *slog.Logger, ch chan<- prometheus
// Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor ... // Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor ...
type Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor struct { type Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor struct {
Name string Name string `mi:"Name"`
PercentGuestRunTime uint64 PercentGuestRunTime uint64 `mi:"PercentGuestRunTime"`
PercentHypervisorRunTime uint64 PercentHypervisorRunTime uint64 `mi:"PercentHypervisorRunTime"`
PercentRemoteRunTime uint64 PercentRemoteRunTime uint64 `mi:"PercentRemoteRunTime"`
PercentTotalRunTime uint64 PercentTotalRunTime uint64 `mi:"PercentTotalRunTime"`
CPUWaitTimePerDispatch uint64 CPUWaitTimePerDispatch uint64 `mi:"CPUWaitTimePerDispatch"`
} }
func (c *Collector) collectHostCpuUsage(logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectHostCpuUsage(logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1240,18 +1241,18 @@ func (c *Collector) collectHostCpuUsage(logger *slog.Logger, ch chan<- prometheu
// Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor ... // Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor ...
type Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor struct { type Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor struct {
Name string Name string `mi:"Name"`
PercentGuestRunTime uint64 PercentGuestRunTime uint64 `mi:"PercentGuestRunTime"`
PercentHypervisorRunTime uint64 PercentHypervisorRunTime uint64 `mi:"PercentHypervisorRunTime"`
PercentRemoteRunTime uint64 PercentRemoteRunTime uint64 `mi:"PercentRemoteRunTime"`
PercentTotalRunTime uint64 PercentTotalRunTime uint64 `mi:"PercentTotalRunTime"`
CPUWaitTimePerDispatch uint64 CPUWaitTimePerDispatch uint64 `mi:"CPUWaitTimePerDispatch"`
} }
func (c *Collector) collectVmCpuUsage(logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectVmCpuUsage(logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor var dst []Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1318,37 +1319,37 @@ func (c *Collector) collectVmCpuUsage(logger *slog.Logger, ch chan<- prometheus.
// Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch ... // Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch ...
type Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch struct { type Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch struct {
Name string Name string `mi:"Name"`
BroadcastPacketsReceivedPersec uint64 BroadcastPacketsReceivedPersec uint64 `mi:"BroadcastPacketsReceivedPersec"`
BroadcastPacketsSentPersec uint64 BroadcastPacketsSentPersec uint64 `mi:"BroadcastPacketsSentPersec"`
BytesPersec uint64 BytesPersec uint64 `mi:"BytesPersec"`
BytesReceivedPersec uint64 BytesReceivedPersec uint64 `mi:"BytesReceivedPersec"`
BytesSentPersec uint64 BytesSentPersec uint64 `mi:"BytesSentPersec"`
DirectedPacketsReceivedPersec uint64 DirectedPacketsReceivedPersec uint64 `mi:"DirectedPacketsReceivedPersec"`
DirectedPacketsSentPersec uint64 DirectedPacketsSentPersec uint64 `mi:"DirectedPacketsSentPersec"`
DroppedPacketsIncomingPersec uint64 DroppedPacketsIncomingPersec uint64 `mi:"DroppedPacketsIncomingPersec"`
DroppedPacketsOutgoingPersec uint64 DroppedPacketsOutgoingPersec uint64 `mi:"DroppedPacketsOutgoingPersec"`
ExtensionsDroppedPacketsIncomingPersec uint64 ExtensionsDroppedPacketsIncomingPersec uint64 `mi:"ExtensionsDroppedPacketsIncomingPersec"`
ExtensionsDroppedPacketsOutgoingPersec uint64 ExtensionsDroppedPacketsOutgoingPersec uint64 `mi:"ExtensionsDroppedPacketsOutgoingPersec"`
LearnedMacAddresses uint64 LearnedMacAddresses uint64 `mi:"LearnedMacAddresses"`
LearnedMacAddressesPersec uint64 LearnedMacAddressesPersec uint64 `mi:"LearnedMacAddressesPersec"`
MulticastPacketsReceivedPersec uint64 MulticastPacketsReceivedPersec uint64 `mi:"MulticastPacketsReceivedPersec"`
MulticastPacketsSentPersec uint64 MulticastPacketsSentPersec uint64 `mi:"MulticastPacketsSentPersec"`
NumberofSendChannelMovesPersec uint64 NumberofSendChannelMovesPersec uint64 `mi:"NumberofSendChannelMovesPersec"`
NumberofVMQMovesPersec uint64 NumberofVMQMovesPersec uint64 `mi:"NumberofVMQMovesPersec"`
PacketsFlooded uint64 PacketsFlooded uint64 `mi:"PacketsFlooded"`
PacketsFloodedPersec uint64 PacketsFloodedPersec uint64 `mi:"PacketsFloodedPersec"`
PacketsPersec uint64 PacketsPersec uint64 `mi:"PacketsPersec"`
PacketsReceivedPersec uint64 PacketsReceivedPersec uint64 `mi:"PacketsReceivedPersec"`
PacketsSentPersec uint64 PacketsSentPersec uint64 `mi:"PacketsSentPersec"`
PurgedMacAddresses uint64 PurgedMacAddresses uint64 `mi:"PurgedMacAddresses"`
PurgedMacAddressesPersec uint64 PurgedMacAddressesPersec uint64 `mi:"PurgedMacAddressesPersec"`
} }
func (c *Collector) collectVmSwitch(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmSwitch(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch var dst []Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1500,19 +1501,19 @@ func (c *Collector) collectVmSwitch(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter ... // Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter ...
type Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter struct { type Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter struct {
Name string Name string `mi:"Name"`
BytesDropped uint64 BytesDropped uint64 `mi:"BytesDropped"`
BytesReceivedPersec uint64 BytesReceivedPersec uint64 `mi:"BytesReceivedPersec"`
BytesSentPersec uint64 BytesSentPersec uint64 `mi:"BytesSentPersec"`
FramesDropped uint64 FramesDropped uint64 `mi:"FramesDropped"`
FramesReceivedPersec uint64 FramesReceivedPersec uint64 `mi:"FramesReceivedPersec"`
FramesSentPersec uint64 FramesSentPersec uint64 `mi:"FramesSentPersec"`
} }
func (c *Collector) collectVmEthernet(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmEthernet(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter var dst []Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1568,19 +1569,19 @@ func (c *Collector) collectVmEthernet(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_Counters_HyperVVirtualStorageDevice ... // Win32_PerfRawData_Counters_HyperVVirtualStorageDevice ...
type Win32_PerfRawData_Counters_HyperVVirtualStorageDevice struct { type Win32_PerfRawData_Counters_HyperVVirtualStorageDevice struct {
Name string Name string `mi:"Name"`
ErrorCount uint64 ErrorCount uint64 `mi:"ErrorCount"`
QueueLength uint32 QueueLength uint32 `mi:"QueueLength"`
ReadBytesPersec uint64 ReadBytesPersec uint64 `mi:"ReadBytesPersec"`
ReadOperationsPerSec uint64 ReadOperationsPerSec uint64 `mi:"ReadOperationsPerSec"`
WriteBytesPersec uint64 WriteBytesPersec uint64 `mi:"WriteBytesPersec"`
WriteOperationsPerSec uint64 WriteOperationsPerSec uint64 `mi:"WriteOperationsPerSec"`
} }
func (c *Collector) collectVmStorage(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmStorage(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_Counters_HyperVVirtualStorageDevice var dst []Win32_PerfRawData_Counters_HyperVVirtualStorageDevice
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_Counters_HyperVVirtualStorageDevice", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_Counters_HyperVVirtualStorageDevice"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1636,19 +1637,19 @@ func (c *Collector) collectVmStorage(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter ... // Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter ...
type Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter struct { type Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter struct {
Name string Name string `mi:"Name"`
BytesReceivedPersec uint64 BytesReceivedPersec uint64 `mi:"BytesReceivedPersec"`
BytesSentPersec uint64 BytesSentPersec uint64 `mi:"BytesSentPersec"`
DroppedPacketsIncomingPersec uint64 DroppedPacketsIncomingPersec uint64 `mi:"DroppedPacketsIncomingPersec"`
DroppedPacketsOutgoingPersec uint64 DroppedPacketsOutgoingPersec uint64 `mi:"DroppedPacketsOutgoingPersec"`
PacketsReceivedPersec uint64 PacketsReceivedPersec uint64 `mi:"PacketsReceivedPersec"`
PacketsSentPersec uint64 PacketsSentPersec uint64 `mi:"PacketsSentPersec"`
} }
func (c *Collector) collectVmNetwork(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmNetwork(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter var dst []Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {
@@ -1704,23 +1705,23 @@ func (c *Collector) collectVmNetwork(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM ... // Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM ...
type Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM struct { type Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM struct {
Name string Name string `mi:"Name"`
AddedMemory uint64 AddedMemory uint64 `mi:"AddedMemory"`
AveragePressure uint64 AveragePressure uint64 `mi:"AveragePressure"`
CurrentPressure uint64 CurrentPressure uint64 `mi:"CurrentPressure"`
GuestVisiblePhysicalMemory uint64 GuestVisiblePhysicalMemory uint64 `mi:"GuestVisiblePhysicalMemory"`
MaximumPressure uint64 MaximumPressure uint64 `mi:"MaximumPressure"`
MemoryAddOperations uint64 MemoryAddOperations uint64 `mi:"MemoryAddOperations"`
MemoryRemoveOperations uint64 MemoryRemoveOperations uint64 `mi:"MemoryRemoveOperations"`
MinimumPressure uint64 MinimumPressure uint64 `mi:"MinimumPressure"`
PhysicalMemory uint64 PhysicalMemory uint64 `mi:"PhysicalMemory"`
RemovedMemory uint64 RemovedMemory uint64 `mi:"RemovedMemory"`
} }
func (c *Collector) collectVmMemory(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmMemory(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM var dst []Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, obj := range dst { for _, obj := range dst {

View File

@@ -3,8 +3,8 @@ package hyperv_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv" "github.com/prometheus-community/windows_exporter/internal/collector/hyperv"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -10,10 +10,10 @@ import (
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows/registry" "golang.org/x/sys/windows/registry"
) )
@@ -199,22 +199,22 @@ func NewWithFlags(app *kingpin.Application) *Collector {
app.Flag( app.Flag(
"collector.iis.app-exclude", "collector.iis.app-exclude",
"Regexp of apps to exclude. App name must both match include and not match exclude to be included.", "Regexp of apps to exclude. App name must both match include and not match exclude to be included.",
).Default(c.config.AppExclude.String()).StringVar(&appExclude) ).Default("").StringVar(&appExclude)
app.Flag( app.Flag(
"collector.iis.app-include", "collector.iis.app-include",
"Regexp of apps to include. App name must both match include and not match exclude to be included.", "Regexp of apps to include. App name must both match include and not match exclude to be included.",
).Default(c.config.AppInclude.String()).StringVar(&appInclude) ).Default(".+").StringVar(&appInclude)
app.Flag( app.Flag(
"collector.iis.site-exclude", "collector.iis.site-exclude",
"Regexp of sites to exclude. Site name must both match include and not match exclude to be included.", "Regexp of sites to exclude. Site name must both match include and not match exclude to be included.",
).Default(c.config.SiteExclude.String()).StringVar(&siteExclude) ).Default("").StringVar(&siteExclude)
app.Flag( app.Flag(
"collector.iis.site-include", "collector.iis.site-include",
"Regexp of sites to include. Site name must both match include and not match exclude to be included.", "Regexp of sites to include. Site name must both match include and not match exclude to be included.",
).Default(c.config.SiteInclude.String()).StringVar(&siteInclude) ).Default(".+").StringVar(&siteInclude)
app.Action(func(*kingpin.ParseContext) error { app.Action(func(*kingpin.ParseContext) error {
var err error var err error
@@ -262,7 +262,7 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
c.iisVersion = getIISVersion(logger) c.iisVersion = getIISVersion(logger)
@@ -1073,7 +1073,7 @@ func (c *Collector) collectWebService(ctx *types.ScrapeContext, logger *slog.Log
var webService []perflibWebService var webService []perflibWebService
if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service"], &webService, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["Web Service"], &webService, logger); err != nil {
return err return err
} }
@@ -1368,7 +1368,7 @@ func (c *Collector) collectAPP_POOL_WAS(ctx *types.ScrapeContext, logger *slog.L
var APP_POOL_WAS []perflibAPP_POOL_WAS var APP_POOL_WAS []perflibAPP_POOL_WAS
if err := perflib.UnmarshalObject(ctx.PerfObjects["APP_POOL_WAS"], &APP_POOL_WAS, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["APP_POOL_WAS"], &APP_POOL_WAS, logger); err != nil {
return err return err
} }
@@ -1548,7 +1548,7 @@ func (c *Collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, logger *slog.Log
var W3SVC_W3WP []perflibW3SVC_W3WP var W3SVC_W3WP []perflibW3SVC_W3WP
if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP, logger); err != nil {
return err return err
} }
@@ -1807,7 +1807,7 @@ func (c *Collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, logger *slog.Log
if c.iisVersion.major >= 8 { if c.iisVersion.major >= 8 {
var W3SVC_W3WP_IIS8 []perflibW3SVC_W3WP_IIS8 var W3SVC_W3WP_IIS8 []perflibW3SVC_W3WP_IIS8
if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP_IIS8, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP_IIS8, logger); err != nil {
return err return err
} }
@@ -1950,7 +1950,7 @@ func (c *Collector) collectWebServiceCache(ctx *types.ScrapeContext, logger *slo
var WebServiceCache []perflibWebServiceCache var WebServiceCache []perflibWebServiceCache
if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service Cache"], &WebServiceCache, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["Web Service Cache"], &WebServiceCache, logger); err != nil {
return err return err
} }

View File

@@ -3,8 +3,8 @@ package iis_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/iis" "github.com/prometheus-community/windows_exporter/internal/collector/iis"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -6,10 +6,10 @@ import (
"log/slog" "log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/headers/slc" "github.com/prometheus-community/windows_exporter/internal/headers/slc"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "license" const Name = "license"
@@ -61,7 +61,7 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
c.licenseStatus = prometheus.NewDesc( c.licenseStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status"), prometheus.BuildFQName(types.Namespace, Name, "status"),
"Status of windows license", "Status of windows license",

View File

@@ -0,0 +1,16 @@
package license_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/license"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, license.Name, license.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, license.New, nil)
}

View File

@@ -0,0 +1,43 @@
package logical_disk
const (
avgDiskReadQueueLength = "Avg. Disk Read Queue Length"
avgDiskSecPerRead = "Avg. Disk sec/Read"
avgDiskSecPerTransfer = "Avg. Disk sec/Transfer"
avgDiskSecPerWrite = "Avg. Disk sec/Write"
avgDiskWriteQueueLength = "Avg. Disk Write Queue Length"
currentDiskQueueLength = "Current Disk Queue Length"
freeSpace = "Free Megabytes"
diskReadBytesPerSec = "Disk Read Bytes/sec"
diskReadsPerSec = "Disk Reads/sec"
diskWriteBytesPerSec = "Disk Write Bytes/sec"
diskWritesPerSec = "Disk Writes/sec"
percentDiskReadTime = "% Disk Read Time"
percentDiskWriteTime = "% Disk Write Time"
percentFreeSpace = "% Free Space"
percentIdleTime = "% Idle Time"
SplitIOPerSec = "Split IO/Sec"
)
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference.
type logicalDisk struct {
Name string
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
AvgDiskReadQueueLength float64 `perflib:"Avg. Disk Read Queue Length"`
AvgDiskWriteQueueLength float64 `perflib:"Avg. Disk Write Queue Length"`
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
PercentIdleTime float64 `perflib:"% Idle Time"`
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
}

View File

@@ -4,6 +4,7 @@ package logical_disk
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"regexp" "regexp"
@@ -12,10 +13,13 @@ import (
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
@@ -35,6 +39,8 @@ var ConfigDefaults = Config{
type Collector struct { type Collector struct {
config Config config Config
perfDataCollector perfdata.Collector
avgReadQueue *prometheus.Desc avgReadQueue *prometheus.Desc
avgWriteQueue *prometheus.Desc avgWriteQueue *prometheus.Desc
freeSpace *prometheus.Desc freeSpace *prometheus.Desc
@@ -93,12 +99,12 @@ func NewWithFlags(app *kingpin.Application) *Collector {
app.Flag( app.Flag(
"collector.logical_disk.volume-exclude", "collector.logical_disk.volume-exclude",
"Regexp of volumes to exclude. Volume name must both match include and not match exclude to be included.", "Regexp of volumes to exclude. Volume name must both match include and not match exclude to be included.",
).Default(c.config.VolumeExclude.String()).StringVar(&volumeExclude) ).Default("").StringVar(&volumeExclude)
app.Flag( app.Flag(
"collector.logical_disk.volume-include", "collector.logical_disk.volume-include",
"Regexp of volumes to include. Volume name must both match include and not match exclude to be included.", "Regexp of volumes to include. Volume name must both match include and not match exclude to be included.",
).Default(c.config.VolumeInclude.String()).StringVar(&volumeInclude) ).Default(".+").StringVar(&volumeInclude)
app.Action(func(*kingpin.ParseContext) error { app.Action(func(*kingpin.ParseContext) error {
var err error var err error
@@ -124,6 +130,10 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{"LogicalDisk"}, nil return []string{"LogicalDisk"}, nil
} }
@@ -131,7 +141,35 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
counters := []string{
currentDiskQueueLength,
avgDiskReadQueueLength,
avgDiskWriteQueueLength,
diskReadBytesPerSec,
diskReadsPerSec,
diskWriteBytesPerSec,
diskWritesPerSec,
percentDiskReadTime,
percentDiskWriteTime,
percentFreeSpace,
freeSpace,
percentIdleTime,
SplitIOPerSec,
avgDiskSecPerRead,
avgDiskSecPerWrite,
avgDiskSecPerTransfer,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "LogicalDisk", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
}
c.information = prometheus.NewDesc( c.information = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"), prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with logical disk information", "A metric with a constant '1' value labeled with logical disk information",
@@ -263,6 +301,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
if utils.PDHEnabled() {
return c.collectPDH(logger, ch)
}
if err := c.collect(ctx, logger, ch); err != nil { if err := c.collect(ctx, logger, ch); err != nil {
logger.Error("failed collecting logical_disk metrics", logger.Error("failed collecting logical_disk metrics",
slog.Any("err", err), slog.Any("err", err),
@@ -274,32 +317,172 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
return nil return nil
} }
// Win32_PerfRawData_PerfDisk_LogicalDisk docs: func (c *Collector) collectPDH(logger *slog.Logger, ch chan<- prometheus.Metric) error {
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class var (
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference. err error
type logicalDisk struct { diskID string
Name string info volumeInfo
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"` )
AvgDiskReadQueueLength float64 `perflib:"Avg. Disk Read Queue Length"`
AvgDiskWriteQueueLength float64 `perflib:"Avg. Disk Write Queue Length"` perfData, err := c.perfDataCollector.Collect()
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"` if err != nil {
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"` return fmt.Errorf("failed to collect LogicalDisk metrics: %w", err)
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"` }
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
PercentDiskReadTime float64 `perflib:"% Disk Read Time"` if len(perfData) == 0 {
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"` return errors.New("perflib query for LogicalDisk returned empty result set")
PercentFreeSpace float64 `perflib:"% Free Space_Base"` }
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
PercentIdleTime float64 `perflib:"% Idle Time"` for name, volume := range perfData {
SplitIOPerSec float64 `perflib:"Split IO/Sec"` if name == "_Total" ||
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"` c.config.VolumeExclude.MatchString(name) ||
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"` !c.config.VolumeInclude.MatchString(name) {
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"` continue
}
diskID, err = getDiskIDByVolume(name)
if err != nil {
logger.Warn("failed to get disk ID for "+name,
slog.Any("err", err),
)
}
info, err = getVolumeInfo(name)
if err != nil {
logger.Warn("failed to get volume information for "+name,
slog.Any("err", err),
)
}
ch <- prometheus.MustNewConstMetric(
c.information,
prometheus.GaugeValue,
1,
diskID,
info.volumeType,
name,
info.label,
info.filesystem,
info.serialNumber,
)
ch <- prometheus.MustNewConstMetric(
c.requestsQueued,
prometheus.GaugeValue,
volume[currentDiskQueueLength].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.avgReadQueue,
prometheus.GaugeValue,
volume[avgDiskReadQueueLength].FirstValue*perftypes.TicksToSecondScaleFactor,
name,
)
ch <- prometheus.MustNewConstMetric(
c.avgWriteQueue,
prometheus.GaugeValue,
volume[avgDiskWriteQueueLength].FirstValue*perftypes.TicksToSecondScaleFactor,
name,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTotal,
prometheus.CounterValue,
volume[diskReadBytesPerSec].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.readsTotal,
prometheus.CounterValue,
volume[diskReadsPerSec].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTotal,
prometheus.CounterValue,
volume[diskWriteBytesPerSec].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.writesTotal,
prometheus.CounterValue,
volume[diskWritesPerSec].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.readTime,
prometheus.CounterValue,
volume[percentDiskReadTime].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.writeTime,
prometheus.CounterValue,
volume[percentDiskWriteTime].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.freeSpace,
prometheus.GaugeValue,
volume[freeSpace].FirstValue*1024*1024,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalSpace,
prometheus.GaugeValue,
volume[percentFreeSpace].FirstValue*1024*1024,
name,
)
ch <- prometheus.MustNewConstMetric(
c.idleTime,
prometheus.CounterValue,
volume[percentIdleTime].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.splitIOs,
prometheus.CounterValue,
volume[SplitIOPerSec].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.readLatency,
prometheus.CounterValue,
volume[avgDiskSecPerRead].FirstValue*perftypes.TicksToSecondScaleFactor,
name,
)
ch <- prometheus.MustNewConstMetric(
c.writeLatency,
prometheus.CounterValue,
volume[avgDiskSecPerWrite].FirstValue*perftypes.TicksToSecondScaleFactor,
name,
)
ch <- prometheus.MustNewConstMetric(
c.readWriteLatency,
prometheus.CounterValue,
volume[avgDiskSecPerTransfer].FirstValue*perftypes.TicksToSecondScaleFactor,
name,
)
}
return nil
} }
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
var ( var (
err error err error
diskID string diskID string
@@ -307,7 +490,7 @@ func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
dst []logicalDisk dst []logicalDisk
) )
if err = perflib.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, logger); err != nil { if err = v1.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, logger); err != nil {
return err return err
} }
@@ -354,14 +537,14 @@ func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.avgReadQueue, c.avgReadQueue,
prometheus.GaugeValue, prometheus.GaugeValue,
volume.AvgDiskReadQueueLength*perflib.TicksToSecondScaleFactor, volume.AvgDiskReadQueueLength*perftypes.TicksToSecondScaleFactor,
volume.Name, volume.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.avgWriteQueue, c.avgWriteQueue,
prometheus.GaugeValue, prometheus.GaugeValue,
volume.AvgDiskWriteQueueLength*perflib.TicksToSecondScaleFactor, volume.AvgDiskWriteQueueLength*perftypes.TicksToSecondScaleFactor,
volume.Name, volume.Name,
) )
@@ -438,21 +621,21 @@ func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.readLatency, c.readLatency,
prometheus.CounterValue, prometheus.CounterValue,
volume.AvgDiskSecPerRead*perflib.TicksToSecondScaleFactor, volume.AvgDiskSecPerRead*perftypes.TicksToSecondScaleFactor,
volume.Name, volume.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.writeLatency, c.writeLatency,
prometheus.CounterValue, prometheus.CounterValue,
volume.AvgDiskSecPerWrite*perflib.TicksToSecondScaleFactor, volume.AvgDiskSecPerWrite*perftypes.TicksToSecondScaleFactor,
volume.Name, volume.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.readWriteLatency, c.readWriteLatency,
prometheus.CounterValue, prometheus.CounterValue,
volume.AvgDiskSecPerTransfer*perflib.TicksToSecondScaleFactor, volume.AvgDiskSecPerTransfer*perftypes.TicksToSecondScaleFactor,
volume.Name, volume.Name,
) )
} }

View File

@@ -4,8 +4,9 @@ import (
"testing" "testing"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk" "github.com/prometheus-community/windows_exporter/internal/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/types"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {
@@ -14,3 +15,9 @@ func BenchmarkCollector(b *testing.B) {
kingpin.CommandLine.GetArg("collector.logical_disk.volume-include").StringVar(&localVolumeInclude) kingpin.CommandLine.GetArg("collector.logical_disk.volume-include").StringVar(&localVolumeInclude)
testutils.FuncBenchmarkCollector(b, "logical_disk", logical_disk.NewWithFlags) testutils.FuncBenchmarkCollector(b, "logical_disk", logical_disk.NewWithFlags)
} }
func TestCollector(t *testing.T) {
testutils.TestCollector(t, logical_disk.New, &logical_disk.Config{
VolumeInclude: types.RegExpAny,
})
}

View File

@@ -0,0 +1,94 @@
//go:build windows
package logon
import (
"fmt"
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/secur32"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "logon"
type Config struct{}
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics.
type Collector struct {
config Config
sessionInfo *prometheus.Desc
}
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
c.sessionInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "session_logon_timestamp_seconds"),
"timestamp of the logon session in seconds.",
[]string{"id", "username", "domain", "type"},
nil,
)
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
return err
}
return nil
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
logonSessions, err := secur32.GetLogonSessions()
if err != nil {
return fmt.Errorf("failed to get logon sessions: %w", err)
}
for _, session := range logonSessions {
ch <- prometheus.MustNewConstMetric(
c.sessionInfo,
prometheus.GaugeValue,
float64(session.LogonTime.Unix()),
session.LogonId.String(), session.UserName, session.LogonDomain, session.LogonType.String(),
)
}
return nil
}

View File

@@ -0,0 +1,17 @@
package logon_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/logon"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func BenchmarkCollector(b *testing.B) {
// No context name required as Collector source is WMI
testutils.FuncBenchmarkCollector(b, logon.Name, logon.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, logon.New, nil)
}

View File

@@ -0,0 +1,75 @@
package memory
const (
availableBytes = "Available Bytes"
availableKBytes = "Available KBytes"
availableMBytes = "Available MBytes"
cacheBytes = "Cache Bytes"
cacheBytesPeak = "Cache Bytes Peak"
cacheFaultsPerSec = "Cache Faults/sec"
commitLimit = "Commit Limit"
committedBytes = "Committed Bytes"
demandZeroFaultsPerSec = "Demand Zero Faults/sec"
freeAndZeroPageListBytes = "Free & Zero Page List Bytes"
freeSystemPageTableEntries = "Free System Page Table Entries"
modifiedPageListBytes = "Modified Page List Bytes"
pageFaultsPerSec = "Page Faults/sec"
pageReadsPerSec = "Page Reads/sec"
pagesInputPerSec = "Pages Input/sec"
pagesOutputPerSec = "Pages Output/sec"
pagesPerSec = "Pages/sec"
pageWritesPerSec = "Page Writes/sec"
poolNonpagedAllocs = "Pool Nonpaged Allocs"
poolNonpagedBytes = "Pool Nonpaged Bytes"
poolPagedAllocs = "Pool Paged Allocs"
poolPagedBytes = "Pool Paged Bytes"
poolPagedResidentBytes = "Pool Paged Resident Bytes"
standbyCacheCoreBytes = "Standby Cache Core Bytes"
standbyCacheNormalPriorityBytes = "Standby Cache Normal Priority Bytes"
standbyCacheReserveBytes = "Standby Cache Reserve Bytes"
systemCacheResidentBytes = "System Cache Resident Bytes"
systemCodeResidentBytes = "System Code Resident Bytes"
systemCodeTotalBytes = "System Code Total Bytes"
systemDriverResidentBytes = "System Driver Resident Bytes"
systemDriverTotalBytes = "System Driver Total Bytes"
transitionFaultsPerSec = "Transition Faults/sec"
transitionPagesRePurposedPerSec = "Transition Pages RePurposed/sec"
writeCopiesPerSec = "Write Copies/sec"
)
type memory struct {
AvailableBytes float64 `perflib:"Available Bytes"`
AvailableKBytes float64 `perflib:"Available KBytes"`
AvailableMBytes float64 `perflib:"Available MBytes"`
CacheBytes float64 `perflib:"Cache Bytes"`
CacheBytesPeak float64 `perflib:"Cache Bytes Peak"`
CacheFaultsPerSec float64 `perflib:"Cache Faults/sec"`
CommitLimit float64 `perflib:"Commit Limit"`
CommittedBytes float64 `perflib:"Committed Bytes"`
DemandZeroFaultsPerSec float64 `perflib:"Demand Zero Faults/sec"`
FreeAndZeroPageListBytes float64 `perflib:"Free & Zero Page List Bytes"`
FreeSystemPageTableEntries float64 `perflib:"Free System Page Table Entries"`
ModifiedPageListBytes float64 `perflib:"Modified Page List Bytes"`
PageFaultsPerSec float64 `perflib:"Page Faults/sec"`
PageReadsPerSec float64 `perflib:"Page Reads/sec"`
PagesInputPerSec float64 `perflib:"Pages Input/sec"`
PagesOutputPerSec float64 `perflib:"Pages Output/sec"`
PagesPerSec float64 `perflib:"Pages/sec"`
PageWritesPerSec float64 `perflib:"Page Writes/sec"`
PoolNonpagedAllocs float64 `perflib:"Pool Nonpaged Allocs"`
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
PoolPagedAllocs float64 `perflib:"Pool Paged Allocs"`
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
PoolPagedResidentBytes float64 `perflib:"Pool Paged Resident Bytes"`
StandbyCacheCoreBytes float64 `perflib:"Standby Cache Core Bytes"`
StandbyCacheNormalPriorityBytes float64 `perflib:"Standby Cache Normal Priority Bytes"`
StandbyCacheReserveBytes float64 `perflib:"Standby Cache Reserve Bytes"`
SystemCacheResidentBytes float64 `perflib:"System Cache Resident Bytes"`
SystemCodeResidentBytes float64 `perflib:"System Code Resident Bytes"`
SystemCodeTotalBytes float64 `perflib:"System Code Total Bytes"`
SystemDriverResidentBytes float64 `perflib:"System Driver Resident Bytes"`
SystemDriverTotalBytes float64 `perflib:"System Driver Total Bytes"`
TransitionFaultsPerSec float64 `perflib:"Transition Faults/sec"`
TransitionPagesRePurposedPerSec float64 `perflib:"Transition Pages RePurposed/sec"`
WriteCopiesPerSec float64 `perflib:"Write Copies/sec"`
}

View File

@@ -11,11 +11,14 @@ import (
"log/slog" "log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi" "github.com/prometheus-community/windows_exporter/internal/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "memory" const Name = "memory"
@@ -28,6 +31,8 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
perfDataCollector perfdata.Collector
// Performance metrics // Performance metrics
availableBytes *prometheus.Desc availableBytes *prometheus.Desc
cacheBytes *prometheus.Desc cacheBytes *prometheus.Desc
@@ -89,6 +94,10 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{"Memory"}, nil return []string{"Memory"}, nil
} }
@@ -96,7 +105,53 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
counters := []string{
availableBytes,
availableKBytes,
availableMBytes,
cacheBytes,
cacheBytesPeak,
cacheFaultsPerSec,
commitLimit,
committedBytes,
demandZeroFaultsPerSec,
freeAndZeroPageListBytes,
freeSystemPageTableEntries,
modifiedPageListBytes,
pageFaultsPerSec,
pageReadsPerSec,
pagesInputPerSec,
pagesOutputPerSec,
pagesPerSec,
pageWritesPerSec,
poolNonpagedAllocs,
poolNonpagedBytes,
poolPagedAllocs,
poolPagedBytes,
poolPagedResidentBytes,
standbyCacheCoreBytes,
standbyCacheNormalPriorityBytes,
standbyCacheReserveBytes,
systemCacheResidentBytes,
systemCodeResidentBytes,
systemCodeTotalBytes,
systemDriverResidentBytes,
systemDriverTotalBytes,
transitionFaultsPerSec,
transitionPagesRePurposedPerSec,
writeCopiesPerSec,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Memory", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
}
c.availableBytes = prometheus.NewDesc( c.availableBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"), prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+ "The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
@@ -170,31 +225,31 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
) )
c.swapPageReadsTotal = prometheus.NewDesc( c.swapPageReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_page_reads_total"), prometheus.BuildFQName(types.Namespace, Name, "swap_page_reads_total"),
"Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec)", "Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPerSec)",
nil, nil,
nil, nil,
) )
c.swapPagesReadTotal = prometheus.NewDesc( c.swapPagesReadTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_pages_read_total"), prometheus.BuildFQName(types.Namespace, Name, "swap_pages_read_total"),
"Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec)", "Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPerSec)",
nil, nil,
nil, nil,
) )
c.swapPagesWrittenTotal = prometheus.NewDesc( c.swapPagesWrittenTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_pages_written_total"), prometheus.BuildFQName(types.Namespace, Name, "swap_pages_written_total"),
"Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec)", "Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPerSec)",
nil, nil,
nil, nil,
) )
c.swapPageOperationsTotal = prometheus.NewDesc( c.swapPageOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_page_operations_total"), prometheus.BuildFQName(types.Namespace, Name, "swap_page_operations_total"),
"Total number of swap page read and writes (PagesPersec)", "Total number of swap page read and writes (PagesPerSec)",
nil, nil,
nil, nil,
) )
c.swapPageWritesTotal = prometheus.NewDesc( c.swapPageWritesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_page_writes_total"), prometheus.BuildFQName(types.Namespace, Name, "swap_page_writes_total"),
"Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec)", "Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPerSec)",
nil, nil,
nil, nil,
) )
@@ -285,19 +340,19 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
c.transitionFaultsTotal = prometheus.NewDesc( c.transitionFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transition_faults_total"), prometheus.BuildFQName(types.Namespace, Name, "transition_faults_total"),
"Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the "+ "Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the "+
"modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPersec)", "modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPerSec)",
nil, nil,
nil, nil,
) )
c.transitionPagesRepurposedTotal = prometheus.NewDesc( c.transitionPagesRepurposedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transition_pages_repurposed_total"), prometheus.BuildFQName(types.Namespace, Name, "transition_pages_repurposed_total"),
"Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPersec)", "Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPerSec)",
nil, nil,
nil, nil,
) )
c.writeCopiesTotal = prometheus.NewDesc( c.writeCopiesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_copies_total"), prometheus.BuildFQName(types.Namespace, Name, "write_copies_total"),
"The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec)", "The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPerSec)",
nil, nil,
nil, nil,
) )
@@ -330,7 +385,14 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
errs := make([]error, 0, 2) errs := make([]error, 0, 2)
if err := c.collectPerformanceData(ctx, logger, ch); err != nil { var err error
if utils.PDHEnabled() {
err = c.collectPDH(ch)
} else {
err = c.collectPerformanceData(ctx, logger, ch)
}
if err != nil {
logger.Error("failed collecting memory metrics", logger.Error("failed collecting memory metrics",
slog.Any("err", err), slog.Any("err", err),
) )
@@ -376,49 +438,12 @@ func (c *Collector) collectGlobalMemoryStatus(ch chan<- prometheus.Metric) error
return nil return nil
} }
type memory struct {
AvailableBytes float64 `perflib:"Available Bytes"`
AvailableKBytes float64 `perflib:"Available KBytes"`
AvailableMBytes float64 `perflib:"Available MBytes"`
CacheBytes float64 `perflib:"Cache Bytes"`
CacheBytesPeak float64 `perflib:"Cache Bytes Peak"`
CacheFaultsPersec float64 `perflib:"Cache Faults/sec"`
CommitLimit float64 `perflib:"Commit Limit"`
CommittedBytes float64 `perflib:"Committed Bytes"`
DemandZeroFaultsPersec float64 `perflib:"Demand Zero Faults/sec"`
FreeAndZeroPageListBytes float64 `perflib:"Free & Zero Page List Bytes"`
FreeSystemPageTableEntries float64 `perflib:"Free System Page Table Entries"`
ModifiedPageListBytes float64 `perflib:"Modified Page List Bytes"`
PageFaultsPersec float64 `perflib:"Page Faults/sec"`
PageReadsPersec float64 `perflib:"Page Reads/sec"`
PagesInputPersec float64 `perflib:"Pages Input/sec"`
PagesOutputPersec float64 `perflib:"Pages Output/sec"`
PagesPersec float64 `perflib:"Pages/sec"`
PageWritesPersec float64 `perflib:"Page Writes/sec"`
PoolNonpagedAllocs float64 `perflib:"Pool Nonpaged Allocs"`
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
PoolPagedAllocs float64 `perflib:"Pool Paged Allocs"`
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
PoolPagedResidentBytes float64 `perflib:"Pool Paged Resident Bytes"`
StandbyCacheCoreBytes float64 `perflib:"Standby Cache Core Bytes"`
StandbyCacheNormalPriorityBytes float64 `perflib:"Standby Cache Normal Priority Bytes"`
StandbyCacheReserveBytes float64 `perflib:"Standby Cache Reserve Bytes"`
SystemCacheResidentBytes float64 `perflib:"System Cache Resident Bytes"`
SystemCodeResidentBytes float64 `perflib:"System Code Resident Bytes"`
SystemCodeTotalBytes float64 `perflib:"System Code Total Bytes"`
SystemDriverResidentBytes float64 `perflib:"System Driver Resident Bytes"`
SystemDriverTotalBytes float64 `perflib:"System Driver Total Bytes"`
TransitionFaultsPersec float64 `perflib:"Transition Faults/sec"`
TransitionPagesRePurposedPersec float64 `perflib:"Transition Pages RePurposed/sec"`
WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
}
func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
var dst []memory var dst []memory
if err := perflib.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, logger); err != nil {
return err return err
} }
@@ -443,7 +468,7 @@ func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slo
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.cacheFaultsTotal, c.cacheFaultsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].CacheFaultsPersec, dst[0].CacheFaultsPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
@@ -461,7 +486,7 @@ func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slo
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.demandZeroFaultsTotal, c.demandZeroFaultsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].DemandZeroFaultsPersec, dst[0].DemandZeroFaultsPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
@@ -485,37 +510,37 @@ func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slo
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.pageFaultsTotal, c.pageFaultsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].PageFaultsPersec, dst[0].PageFaultsPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.swapPageReadsTotal, c.swapPageReadsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].PageReadsPersec, dst[0].PageReadsPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.swapPagesReadTotal, c.swapPagesReadTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].PagesInputPersec, dst[0].PagesInputPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.swapPagesWrittenTotal, c.swapPagesWrittenTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].PagesOutputPersec, dst[0].PagesOutputPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.swapPageOperationsTotal, c.swapPageOperationsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].PagesPersec, dst[0].PagesPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.swapPageWritesTotal, c.swapPageWritesTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].PageWritesPersec, dst[0].PageWritesPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
@@ -599,19 +624,226 @@ func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slo
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.transitionFaultsTotal, c.transitionFaultsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].TransitionFaultsPersec, dst[0].TransitionFaultsPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.transitionPagesRepurposedTotal, c.transitionPagesRepurposedTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].TransitionPagesRePurposedPersec, dst[0].TransitionPagesRePurposedPerSec,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.writeCopiesTotal, c.writeCopiesTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].WriteCopiesPersec, dst[0].WriteCopiesPerSec,
)
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Memory metrics: %w", err)
}
data, ok := perfData[perftypes.EmptyInstance]
if !ok {
return errors.New("perflib query for Memory returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.availableBytes,
prometheus.GaugeValue,
data[availableBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.cacheBytes,
prometheus.GaugeValue,
data[cacheBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.cacheBytesPeak,
prometheus.GaugeValue,
data[cacheBytesPeak].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.cacheFaultsTotal,
prometheus.CounterValue,
data[cacheFaultsPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.commitLimit,
prometheus.GaugeValue,
data[commitLimit].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.committedBytes,
prometheus.GaugeValue,
data[committedBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.demandZeroFaultsTotal,
prometheus.CounterValue,
data[demandZeroFaultsPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.freeAndZeroPageListBytes,
prometheus.GaugeValue,
data[freeAndZeroPageListBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.freeSystemPageTableEntries,
prometheus.GaugeValue,
data[freeSystemPageTableEntries].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.modifiedPageListBytes,
prometheus.GaugeValue,
data[modifiedPageListBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.pageFaultsTotal,
prometheus.CounterValue,
data[pageFaultsPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageReadsTotal,
prometheus.CounterValue,
data[pageReadsPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.swapPagesReadTotal,
prometheus.CounterValue,
data[pagesInputPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.swapPagesWrittenTotal,
prometheus.CounterValue,
data[pagesOutputPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageOperationsTotal,
prometheus.CounterValue,
data[pagesPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageWritesTotal,
prometheus.CounterValue,
data[pageWritesPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.poolNonPagedAllocationsTotal,
prometheus.GaugeValue,
data[poolNonpagedAllocs].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.poolNonPagedBytes,
prometheus.GaugeValue,
data[poolNonpagedBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedAllocationsTotal,
prometheus.CounterValue,
data[poolPagedAllocs].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedBytes,
prometheus.GaugeValue,
data[poolPagedBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedResidentBytes,
prometheus.GaugeValue,
data[poolPagedResidentBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheCoreBytes,
prometheus.GaugeValue,
data[standbyCacheCoreBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheNormalPriorityBytes,
prometheus.GaugeValue,
data[standbyCacheNormalPriorityBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheReserveBytes,
prometheus.GaugeValue,
data[standbyCacheReserveBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.systemCacheResidentBytes,
prometheus.GaugeValue,
data[systemCacheResidentBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.systemCodeResidentBytes,
prometheus.GaugeValue,
data[systemCodeResidentBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.systemCodeTotalBytes,
prometheus.GaugeValue,
data[systemCodeTotalBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.systemDriverResidentBytes,
prometheus.GaugeValue,
data[systemDriverResidentBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.systemDriverTotalBytes,
prometheus.GaugeValue,
data[systemDriverTotalBytes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.transitionFaultsTotal,
prometheus.CounterValue,
data[transitionFaultsPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.transitionPagesRepurposedTotal,
prometheus.CounterValue,
data[transitionPagesRePurposedPerSec].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.writeCopiesTotal,
prometheus.CounterValue,
data[writeCopiesPerSec].FirstValue,
) )
return nil return nil

View File

@@ -0,0 +1,16 @@
package memory_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/memory"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, memory.Name, memory.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, memory.New, nil)
}

View File

@@ -8,9 +8,9 @@ import (
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "mscluster" const Name = "mscluster"
@@ -32,7 +32,7 @@ var ConfigDefaults = Config{
// A Collector is a Prometheus Collector for WMI MSCluster_Cluster metrics. // A Collector is a Prometheus Collector for WMI MSCluster_Cluster metrics.
type Collector struct { type Collector struct {
config Config config Config
wmiClient *wmi.Client miSession *mi.Session
// cluster // cluster
clusterAddEvictDelay *prometheus.Desc clusterAddEvictDelay *prometheus.Desc
@@ -196,7 +196,7 @@ func NewWithFlags(app *kingpin.Application) *Collector {
var collectorsEnabled string var collectorsEnabled string
app.Flag( app.Flag(
"collectors.mscluster.enabled", "collector.mscluster.enabled",
"Comma-separated list of collectors to use.", "Comma-separated list of collectors to use.",
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled) ).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
@@ -221,16 +221,16 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if len(c.config.CollectorsEnabled) == 0 { if len(c.config.CollectorsEnabled) == 0 {
return nil return nil
} }
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { if miSession == nil {
return errors.New("wmiClient or SWbemServicesClient is nil") return errors.New("miSession is nil")
} }
c.wmiClient = wmiClient c.miSession = miSession
if slices.Contains(c.config.CollectorsEnabled, "cluster") { if slices.Contains(c.config.CollectorsEnabled, "cluster") {
c.buildCluster() c.buildCluster()

View File

@@ -1,7 +1,11 @@
package mscluster package mscluster
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -10,85 +14,85 @@ const nameCluster = Name + "_cluster"
// msClusterCluster represents the MSCluster_Cluster WMI class // msClusterCluster represents the MSCluster_Cluster WMI class
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-cluster // - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-cluster
type msClusterCluster struct { type msClusterCluster struct {
Name string Name string `mi:"Name"`
AddEvictDelay uint AddEvictDelay uint `mi:"AddEvictDelay"`
AdminAccessPoint uint AdminAccessPoint uint `mi:"AdminAccessPoint"`
AutoAssignNodeSite uint AutoAssignNodeSite uint `mi:"AutoAssignNodeSite"`
AutoBalancerLevel uint AutoBalancerLevel uint `mi:"AutoBalancerLevel"`
AutoBalancerMode uint AutoBalancerMode uint `mi:"AutoBalancerMode"`
BackupInProgress uint BackupInProgress uint `mi:"BackupInProgress"`
BlockCacheSize uint BlockCacheSize uint `mi:"BlockCacheSize"`
ClusSvcHangTimeout uint ClusSvcHangTimeout uint `mi:"ClusSvcHangTimeout"`
ClusSvcRegroupOpeningTimeout uint ClusSvcRegroupOpeningTimeout uint `mi:"ClusSvcRegroupOpeningTimeout"`
ClusSvcRegroupPruningTimeout uint ClusSvcRegroupPruningTimeout uint `mi:"ClusSvcRegroupPruningTimeout"`
ClusSvcRegroupStageTimeout uint ClusSvcRegroupStageTimeout uint `mi:"ClusSvcRegroupStageTimeout"`
ClusSvcRegroupTickInMilliseconds uint ClusSvcRegroupTickInMilliseconds uint `mi:"ClusSvcRegroupTickInMilliseconds"`
ClusterEnforcedAntiAffinity uint ClusterEnforcedAntiAffinity uint `mi:"ClusterEnforcedAntiAffinity"`
ClusterFunctionalLevel uint ClusterFunctionalLevel uint `mi:"ClusterFunctionalLevel"`
ClusterGroupWaitDelay uint ClusterGroupWaitDelay uint `mi:"ClusterGroupWaitDelay"`
ClusterLogLevel uint ClusterLogLevel uint `mi:"ClusterLogLevel"`
ClusterLogSize uint ClusterLogSize uint `mi:"ClusterLogSize"`
ClusterUpgradeVersion uint ClusterUpgradeVersion uint `mi:"ClusterUpgradeVersion"`
CrossSiteDelay uint CrossSiteDelay uint `mi:"CrossSiteDelay"`
CrossSiteThreshold uint CrossSiteThreshold uint `mi:"CrossSiteThreshold"`
CrossSubnetDelay uint CrossSubnetDelay uint `mi:"CrossSubnetDelay"`
CrossSubnetThreshold uint CrossSubnetThreshold uint `mi:"CrossSubnetThreshold"`
CsvBalancer uint CsvBalancer uint `mi:"CsvBalancer"`
DatabaseReadWriteMode uint DatabaseReadWriteMode uint `mi:"DatabaseReadWriteMode"`
DefaultNetworkRole uint DefaultNetworkRole uint `mi:"DefaultNetworkRole"`
DetectedCloudPlatform uint DetectedCloudPlatform uint `mi:"DetectedCloudPlatform"`
DetectManagedEvents uint DetectManagedEvents uint `mi:"DetectManagedEvents"`
DetectManagedEventsThreshold uint DetectManagedEventsThreshold uint `mi:"DetectManagedEventsThreshold"`
DisableGroupPreferredOwnerRandomization uint DisableGroupPreferredOwnerRandomization uint `mi:"DisableGroupPreferredOwnerRandomization"`
DrainOnShutdown uint DrainOnShutdown uint `mi:"DrainOnShutdown"`
DynamicQuorumEnabled uint DynamicQuorumEnabled uint `mi:"DynamicQuorumEnabled"`
EnableSharedVolumes uint EnableSharedVolumes uint `mi:"EnableSharedVolumes"`
FixQuorum uint FixQuorum uint `mi:"FixQuorum"`
GracePeriodEnabled uint GracePeriodEnabled uint `mi:"GracePeriodEnabled"`
GracePeriodTimeout uint GracePeriodTimeout uint `mi:"GracePeriodTimeout"`
GroupDependencyTimeout uint GroupDependencyTimeout uint `mi:"GroupDependencyTimeout"`
HangRecoveryAction uint HangRecoveryAction uint `mi:"HangRecoveryAction"`
IgnorePersistentStateOnStartup uint IgnorePersistentStateOnStartup uint `mi:"IgnorePersistentStateOnStartup"`
LogResourceControls uint LogResourceControls uint `mi:"LogResourceControls"`
LowerQuorumPriorityNodeId uint LowerQuorumPriorityNodeId uint `mi:"LowerQuorumPriorityNodeId"`
MaxNumberOfNodes uint MaxNumberOfNodes uint `mi:"MaxNumberOfNodes"`
MessageBufferLength uint MessageBufferLength uint `mi:"MessageBufferLength"`
MinimumNeverPreemptPriority uint MinimumNeverPreemptPriority uint `mi:"MinimumNeverPreemptPriority"`
MinimumPreemptorPriority uint MinimumPreemptorPriority uint `mi:"MinimumPreemptorPriority"`
NetftIPSecEnabled uint NetftIPSecEnabled uint `mi:"NetftIPSecEnabled"`
PlacementOptions uint PlacementOptions uint `mi:"PlacementOptions"`
PlumbAllCrossSubnetRoutes uint PlumbAllCrossSubnetRoutes uint `mi:"PlumbAllCrossSubnetRoutes"`
PreventQuorum uint PreventQuorum uint `mi:"PreventQuorum"`
QuarantineDuration uint QuarantineDuration uint `mi:"QuarantineDuration"`
QuarantineThreshold uint QuarantineThreshold uint `mi:"QuarantineThreshold"`
QuorumArbitrationTimeMax uint QuorumArbitrationTimeMax uint `mi:"QuorumArbitrationTimeMax"`
QuorumArbitrationTimeMin uint QuorumArbitrationTimeMin uint `mi:"QuorumArbitrationTimeMin"`
QuorumLogFileSize uint QuorumLogFileSize uint `mi:"QuorumLogFileSize"`
QuorumTypeValue uint QuorumTypeValue uint `mi:"QuorumTypeValue"`
RequestReplyTimeout uint RequestReplyTimeout uint `mi:"RequestReplyTimeout"`
ResiliencyDefaultPeriod uint ResiliencyDefaultPeriod uint `mi:"ResiliencyDefaultPeriod"`
ResiliencyLevel uint ResiliencyLevel uint `mi:"ResiliencyLevel"`
ResourceDllDeadlockPeriod uint ResourceDllDeadlockPeriod uint `mi:"ResourceDllDeadlockPeriod"`
RootMemoryReserved uint RootMemoryReserved uint `mi:"RootMemoryReserved"`
RouteHistoryLength uint RouteHistoryLength uint `mi:"RouteHistoryLength"`
S2DBusTypes uint S2DBusTypes uint `mi:"S2DBusTypes"`
S2DCacheDesiredState uint S2DCacheDesiredState uint `mi:"S2DCacheDesiredState"`
S2DCacheFlashReservePercent uint S2DCacheFlashReservePercent uint `mi:"S2DCacheFlashReservePercent"`
S2DCachePageSizeKBytes uint S2DCachePageSizeKBytes uint `mi:"S2DCachePageSizeKBytes"`
S2DEnabled uint S2DEnabled uint `mi:"S2DEnabled"`
S2DIOLatencyThreshold uint S2DIOLatencyThreshold uint `mi:"S2DIOLatencyThreshold"`
S2DOptimizations uint S2DOptimizations uint `mi:"S2DOptimizations"`
SameSubnetDelay uint SameSubnetDelay uint `mi:"SameSubnetDelay"`
SameSubnetThreshold uint SameSubnetThreshold uint `mi:"SameSubnetThreshold"`
SecurityLevel uint SecurityLevel uint `mi:"SecurityLevel"`
SecurityLevelForStorage uint SecurityLevelForStorage uint `mi:"SecurityLevelForStorage"`
SharedVolumeVssWriterOperationTimeout uint SharedVolumeVssWriterOperationTimeout uint `mi:"SharedVolumeVssWriterOperationTimeout"`
ShutdownTimeoutInMinutes uint ShutdownTimeoutInMinutes uint `mi:"ShutdownTimeoutInMinutes"`
UseClientAccessNetworksForSharedVolumes uint UseClientAccessNetworksForSharedVolumes uint `mi:"UseClientAccessNetworksForSharedVolumes"`
WitnessDatabaseWriteTimeout uint WitnessDatabaseWriteTimeout uint `mi:"WitnessDatabaseWriteTimeout"`
WitnessDynamicWeight uint WitnessDynamicWeight uint `mi:"WitnessDynamicWeight"`
WitnessRestartInterval uint WitnessRestartInterval uint `mi:"WitnessRestartInterval"`
} }
func (c *Collector) buildCluster() { func (c *Collector) buildCluster() {
@@ -558,8 +562,8 @@ func (c *Collector) buildCluster() {
func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error { func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
var dst []msClusterCluster var dst []msClusterCluster
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Cluster", &dst, nil, "root/MSCluster"); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, utils.Must(mi.NewQuery("SELECT * MSCluster_Cluster"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, v := range dst { for _, v := range dst {

View File

@@ -1,7 +1,11 @@
package mscluster package mscluster
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -10,13 +14,13 @@ const nameNetwork = Name + "_network"
// msClusterNetwork represents the MSCluster_Network WMI class // msClusterNetwork represents the MSCluster_Network WMI class
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-network // - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-network
type msClusterNetwork struct { type msClusterNetwork struct {
Name string Name string `mi:"Name"`
Characteristics uint Characteristics uint `mi:"Characteristics"`
Flags uint Flags uint `mi:"Flags"`
Metric uint Metric uint `mi:"Metric"`
Role uint Role uint `mi:"Role"`
State uint State uint `mi:"State"`
} }
func (c *Collector) buildNetwork() { func (c *Collector) buildNetwork() {
@@ -57,8 +61,8 @@ func (c *Collector) buildNetwork() {
func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error { func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error {
var dst []msClusterNetwork var dst []msClusterNetwork
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Network", &dst, nil, "root/MSCluster"); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, utils.Must(mi.NewQuery("SELECT * MSCluster_Node"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, v := range dst { for _, v := range dst {

View File

@@ -1,7 +1,11 @@
package mscluster package mscluster
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -10,22 +14,22 @@ const nameNode = Name + "_node"
// msClusterNode represents the MSCluster_Node WMI class // msClusterNode represents the MSCluster_Node WMI class
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-node // - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-node
type msClusterNode struct { type msClusterNode struct {
Name string Name string `mi:"Name"`
BuildNumber uint BuildNumber uint `mi:"BuildNumber"`
Characteristics uint Characteristics uint `mi:"Characteristics"`
DetectedCloudPlatform uint DetectedCloudPlatform uint `mi:"DetectedCloudPlatform"`
DynamicWeight uint DynamicWeight uint `mi:"DynamicWeight"`
Flags uint Flags uint `mi:"Flags"`
MajorVersion uint MajorVersion uint `mi:"MajorVersion"`
MinorVersion uint MinorVersion uint `mi:"MinorVersion"`
NeedsPreventQuorum uint NeedsPreventQuorum uint `mi:"NeedsPreventQuorum"`
NodeDrainStatus uint NodeDrainStatus uint `mi:"NodeDrainStatus"`
NodeHighestVersion uint NodeHighestVersion uint `mi:"NodeHighestVersion"`
NodeLowestVersion uint NodeLowestVersion uint `mi:"NodeLowestVersion"`
NodeWeight uint NodeWeight uint `mi:"NodeWeight"`
State uint State uint `mi:"State"`
StatusInformation uint StatusInformation uint `mi:"StatusInformation"`
} }
func (c *Collector) buildNode() { func (c *Collector) buildNode() {
@@ -120,8 +124,8 @@ func (c *Collector) buildNode() {
func (c *Collector) collectNode(ch chan<- prometheus.Metric) ([]string, error) { func (c *Collector) collectNode(ch chan<- prometheus.Metric) ([]string, error) {
var dst []msClusterNode var dst []msClusterNode
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Node", &dst, nil, "root/MSCluster"); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, utils.Must(mi.NewQuery("SELECT * FROM MSCluster_Node"))); err != nil {
return nil, err return nil, fmt.Errorf("WMI query failed: %w", err)
} }
nodeNames := make([]string, 0, len(dst)) nodeNames := make([]string, 0, len(dst))

View File

@@ -1,7 +1,11 @@
package mscluster package mscluster
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -10,27 +14,27 @@ const nameResource = Name + "_resource"
// msClusterResource represents the MSCluster_Resource WMI class // msClusterResource represents the MSCluster_Resource WMI class
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resource // - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resource
type msClusterResource struct { type msClusterResource struct {
Name string Name string `mi:"Name"`
Type string Type string `mi:"Type"`
OwnerGroup string OwnerGroup string `mi:"OwnerGroup"`
OwnerNode string OwnerNode string `mi:"OwnerNode"`
Characteristics uint Characteristics uint `mi:"Characteristics"`
DeadlockTimeout uint DeadlockTimeout uint `mi:"DeadlockTimeout"`
EmbeddedFailureAction uint EmbeddedFailureAction uint `mi:"EmbeddedFailureAction"`
Flags uint Flags uint `mi:"Flags"`
IsAlivePollInterval uint IsAlivePollInterval uint `mi:"IsAlivePollInterval"`
LooksAlivePollInterval uint LooksAlivePollInterval uint `mi:"LooksAlivePollInterval"`
MonitorProcessId uint MonitorProcessId uint `mi:"MonitorProcessId"`
PendingTimeout uint PendingTimeout uint `mi:"PendingTimeout"`
ResourceClass uint ResourceClass uint `mi:"ResourceClass"`
RestartAction uint RestartAction uint `mi:"RestartAction"`
RestartDelay uint RestartDelay uint `mi:"RestartDelay"`
RestartPeriod uint RestartPeriod uint `mi:"RestartPeriod"`
RestartThreshold uint RestartThreshold uint `mi:"RestartThreshold"`
RetryPeriodOnFailure uint RetryPeriodOnFailure uint `mi:"RetryPeriodOnFailure"`
State uint State uint `mi:"State"`
Subclass uint Subclass uint `mi:"Subclass"`
} }
func (c *Collector) buildResource() { func (c *Collector) buildResource() {
@@ -149,8 +153,8 @@ func (c *Collector) buildResource() {
func (c *Collector) collectResource(ch chan<- prometheus.Metric, nodeNames []string) error { func (c *Collector) collectResource(ch chan<- prometheus.Metric, nodeNames []string) error {
var dst []msClusterResource var dst []msClusterResource
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Resource", &dst, nil, "root/MSCluster"); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, utils.Must(mi.NewQuery("SELECT * FROM MSCluster_Resource"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, v := range dst { for _, v := range dst {

View File

@@ -1,7 +1,11 @@
package mscluster package mscluster
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -10,22 +14,22 @@ const nameResourceGroup = Name + "_resourcegroup"
// msClusterResourceGroup represents the MSCluster_ResourceGroup WMI class // msClusterResourceGroup represents the MSCluster_ResourceGroup WMI class
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resourcegroup // - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resourcegroup
type msClusterResourceGroup struct { type msClusterResourceGroup struct {
Name string Name string `mi:"Name"`
AutoFailbackType uint AutoFailbackType uint `mi:"AutoFailbackType"`
Characteristics uint Characteristics uint `mi:"Characteristics"`
ColdStartSetting uint ColdStartSetting uint `mi:"ColdStartSetting"`
DefaultOwner uint DefaultOwner uint `mi:"DefaultOwner"`
FailbackWindowEnd int FailbackWindowEnd int `mi:"FailbackWindowEnd"`
FailbackWindowStart int FailbackWindowStart int `mi:"FailbackWindowStart"`
FailoverPeriod uint FailoverPeriod uint `mi:"FailoverPeriod"`
FailoverThreshold uint FailoverThreshold uint `mi:"FailoverThreshold"`
Flags uint Flags uint `mi:"Flags"`
GroupType uint GroupType uint `mi:"GroupType"`
OwnerNode string OwnerNode string `mi:"OwnerNode"`
Priority uint Priority uint `mi:"Priority"`
ResiliencyPeriod uint ResiliencyPeriod uint `mi:"ResiliencyPeriod"`
State uint State uint `mi:"State"`
} }
func (c *Collector) buildResourceGroup() { func (c *Collector) buildResourceGroup() {
@@ -126,8 +130,8 @@ func (c *Collector) buildResourceGroup() {
func (c *Collector) collectResourceGroup(ch chan<- prometheus.Metric, nodeNames []string) error { func (c *Collector) collectResourceGroup(ch chan<- prometheus.Metric, nodeNames []string) error {
var dst []msClusterResourceGroup var dst []msClusterResourceGroup
if err := c.wmiClient.Query("SELECT * FROM MSCluster_ResourceGroup", &dst, nil, "root/MSCluster"); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, utils.Must(mi.NewQuery("SELECT * FROM MSCluster_ResourceGroup"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, v := range dst { for _, v := range dst {

View File

@@ -4,14 +4,15 @@ package msmq
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "msmq" const Name = "msmq"
@@ -27,7 +28,7 @@ var ConfigDefaults = Config{
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics.
type Collector struct { type Collector struct {
config Config config Config
wmiClient *wmi.Client miSession *mi.Session
bytesInJournalQueue *prometheus.Desc bytesInJournalQueue *prometheus.Desc
bytesInQueue *prometheus.Desc bytesInQueue *prometheus.Desc
@@ -75,14 +76,14 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(logger *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { if miSession == nil {
return errors.New("wmiClient or SWbemServicesClient is nil") return errors.New("miSession is nil")
} }
c.wmiClient = wmiClient c.miSession = miSession
if *c.config.QueryWhereClause == "" { if *c.config.QueryWhereClause == "" {
logger.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!") logger.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
@@ -132,12 +133,12 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
} }
type msmqQueue struct { type msmqQueue struct {
Name string Name string `mi:"Name"`
BytesInJournalQueue uint64 BytesInJournalQueue uint64 `mi:"BytesInJournalQueue"`
BytesInQueue uint64 BytesInQueue uint64 `mi:"BytesInQueue"`
MessagesInJournalQueue uint64 MessagesInJournalQueue uint64 `mi:"MessagesInJournalQueue"`
MessagesInQueue uint64 MessagesInQueue uint64 `mi:"MessagesInQueue"`
} }
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
@@ -148,8 +149,13 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
query += " WHERE " + *c.config.QueryWhereClause query += " WHERE " + *c.config.QueryWhereClause
} }
if err := c.wmiClient.Query(query, &dst); err != nil { queryExpression, err := mi.NewQuery(query)
return err if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, queryExpression); err != nil {
return fmt.Errorf("WMI query failed: %w", err)
} }
for _, msmq := range dst { for _, msmq := range dst {

View File

@@ -3,8 +3,8 @@ package msmq_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq" "github.com/prometheus-community/windows_exporter/internal/collector/msmq"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -13,10 +13,10 @@ import (
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/pkg/types" v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows/registry" "golang.org/x/sys/windows/registry"
) )
@@ -452,12 +452,12 @@ func NewWithFlags(app *kingpin.Application) *Collector {
var collectorsEnabled string var collectorsEnabled string
app.Flag( app.Flag(
"collectors.mssql.class-print", "collector.mssql.class-print",
"If true, print available mssql WMI classes and exit. Only displays if the mssql collector is enabled.", "If true, print available mssql WMI classes and exit. Only displays if the mssql collector is enabled.",
).BoolVar(&listAllCollectors) ).BoolVar(&listAllCollectors)
app.Flag( app.Flag(
"collectors.mssql.classes-enabled", "collector.mssql.classes-enabled",
"Comma-separated list of mssql WMI classes to use.", "Comma-separated list of mssql WMI classes to use.",
).Default(strings.Join(c.config.CollectorsEnabled, ",")).StringVar(&collectorsEnabled) ).Default(strings.Join(c.config.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
@@ -508,7 +508,7 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Result must order, to prevent test failures. // Result must order, to prevent test failures.
sort.Strings(c.config.CollectorsEnabled) sort.Strings(c.config.CollectorsEnabled)
@@ -2104,7 +2104,7 @@ func (c *Collector) collectAccessMethods(ctx *types.ScrapeContext, logger *slog.
logger.Debug(fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst, logger); err != nil {
return err return err
} }
@@ -2441,7 +2441,7 @@ func (c *Collector) collectAvailabilityReplica(ctx *types.ScrapeContext, logger
logger.Debug(fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst, logger); err != nil {
return err return err
} }
@@ -2552,7 +2552,7 @@ func (c *Collector) collectBufferManager(ctx *types.ScrapeContext, logger *slog.
logger.Debug(fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst, logger); err != nil {
return err return err
} }
@@ -2757,7 +2757,7 @@ func (c *Collector) collectDatabaseReplica(ctx *types.ScrapeContext, logger *slo
logger.Debug(fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst, logger); err != nil {
return err return err
} }
@@ -2999,7 +2999,7 @@ func (c *Collector) collectDatabases(ctx *types.ScrapeContext, logger *slog.Logg
logger.Debug(fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst, logger); err != nil {
return err return err
} }
@@ -3384,7 +3384,7 @@ func (c *Collector) collectGeneralStatistics(ctx *types.ScrapeContext, logger *s
logger.Debug(fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst, logger); err != nil {
return err return err
} }
@@ -3580,7 +3580,7 @@ func (c *Collector) collectLocks(ctx *types.ScrapeContext, logger *slog.Logger,
logger.Debug(fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst, logger); err != nil {
return err return err
} }
@@ -3681,7 +3681,7 @@ func (c *Collector) collectMemoryManager(ctx *types.ScrapeContext, logger *slog.
logger.Debug(fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst, logger); err != nil {
return err return err
} }
@@ -3851,7 +3851,7 @@ func (c *Collector) collectSQLStats(ctx *types.ScrapeContext, logger *slog.Logge
logger.Debug(fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst, logger); err != nil {
return err return err
} }
@@ -3960,7 +3960,7 @@ func (c *Collector) collectWaitStats(ctx *types.ScrapeContext, logger *slog.Logg
logger.Debug(fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst, logger); err != nil {
return err return err
} }
@@ -4067,7 +4067,7 @@ func (c *Collector) collectSQLErrors(ctx *types.ScrapeContext, logger *slog.Logg
logger.Debug(fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst, logger); err != nil {
return err return err
} }
@@ -4112,7 +4112,7 @@ func (c *Collector) collectTransactions(ctx *types.ScrapeContext, logger *slog.L
logger.Debug(fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance)) logger.Debug(fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst, logger); err != nil { if err := v1.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst, logger); err != nil {
return err return err
} }

View File

@@ -3,8 +3,8 @@ package mssql_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql" "github.com/prometheus-community/windows_exporter/internal/collector/mssql"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,36 @@
package net
const (
BytesReceivedPerSec = "Bytes Received/sec"
BytesSentPerSec = "Bytes Sent/sec"
BytesTotalPerSec = "Bytes Total/sec"
OutputQueueLength = "Output Queue Length"
PacketsOutboundDiscarded = "Packets Outbound Discarded"
PacketsOutboundErrors = "Packets Outbound Errors"
PacketsPerSec = "Packets/sec"
PacketsReceivedDiscarded = "Packets Received Discarded"
PacketsReceivedErrors = "Packets Received Errors"
PacketsReceivedPerSec = "Packets Received/sec"
PacketsReceivedUnknown = "Packets Received Unknown"
PacketsSentPerSec = "Packets Sent/sec"
CurrentBandwidth = "Current Bandwidth"
)
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
type perflibNetworkInterface struct {
BytesReceivedPerSec float64 `perflib:"Bytes Received/sec"`
BytesSentPerSec float64 `perflib:"Bytes Sent/sec"`
BytesTotalPerSec float64 `perflib:"Bytes Total/sec"`
Name string
OutputQueueLength float64 `perflib:"Output Queue Length"`
PacketsOutboundDiscarded float64 `perflib:"Packets Outbound Discarded"`
PacketsOutboundErrors float64 `perflib:"Packets Outbound Errors"`
PacketsPerSec float64 `perflib:"Packets/sec"`
PacketsReceivedDiscarded float64 `perflib:"Packets Received Discarded"`
PacketsReceivedErrors float64 `perflib:"Packets Received Errors"`
PacketsReceivedPerSec float64 `perflib:"Packets Received/sec"`
PacketsReceivedUnknown float64 `perflib:"Packets Received Unknown"`
PacketsSentPerSec float64 `perflib:"Packets Sent/sec"`
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
}

View File

@@ -0,0 +1,599 @@
//go:build windows
package net
import (
"errors"
"fmt"
"log/slog"
"os"
"regexp"
"slices"
"strings"
"unsafe"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
)
const Name = "net"
type Config struct {
NicExclude *regexp.Regexp `yaml:"nic_exclude"`
NicInclude *regexp.Regexp `yaml:"nic_include"`
CollectorsEnabled []string `yaml:"collectors_enabled"`
}
var ConfigDefaults = Config{
NicExclude: types.RegExpEmpty,
NicInclude: types.RegExpAny,
CollectorsEnabled: []string{
"metrics",
"nic_addresses",
},
}
// A Collector is a Prometheus Collector for Perflib Network Interface metrics.
type Collector struct {
config Config
perfDataCollector perfdata.Collector
bytesReceivedTotal *prometheus.Desc
bytesSentTotal *prometheus.Desc
bytesTotal *prometheus.Desc
outputQueueLength *prometheus.Desc
packetsOutboundDiscarded *prometheus.Desc
packetsOutboundErrors *prometheus.Desc
packetsTotal *prometheus.Desc
packetsReceivedDiscarded *prometheus.Desc
packetsReceivedErrors *prometheus.Desc
packetsReceivedTotal *prometheus.Desc
packetsReceivedUnknown *prometheus.Desc
packetsSentTotal *prometheus.Desc
currentBandwidth *prometheus.Desc
nicAddressInfo *prometheus.Desc
routeInfo *prometheus.Desc
}
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
if config.NicExclude == nil {
config.NicExclude = ConfigDefaults.NicExclude
}
if config.NicInclude == nil {
config.NicInclude = ConfigDefaults.NicInclude
}
if config.CollectorsEnabled == nil {
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
}
c := &Collector{
config: *config,
}
return c
}
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
c.config.CollectorsEnabled = make([]string, 0)
var nicExclude, nicInclude string
var collectorsEnabled string
app.Flag(
"collector.net.nic-exclude",
"Regexp of NIC:s to exclude. NIC name must both match include and not match exclude to be included.",
).Default("").StringVar(&nicExclude)
app.Flag(
"collector.net.nic-include",
"Regexp of NIC:s to include. NIC name must both match include and not match exclude to be included.",
).Default(".+").StringVar(&nicInclude)
app.Flag(
"collector.net.enabled",
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.Action(func(*kingpin.ParseContext) error {
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
var err error
c.config.NicExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", nicExclude))
if err != nil {
return fmt.Errorf("collector.net.nic-exclude: %w", err)
}
c.config.NicInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", nicInclude))
if err != nil {
return fmt.Errorf("collector.net.nic-include: %w", err)
}
return nil
})
return c
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if utils.PDHEnabled() {
return []string{}, nil
}
return []string{"Network Interface"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
return nil
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
if utils.PDHEnabled() {
counters := []string{
BytesReceivedPerSec,
BytesSentPerSec,
BytesTotalPerSec,
OutputQueueLength,
PacketsOutboundDiscarded,
PacketsOutboundErrors,
PacketsPerSec,
PacketsReceivedDiscarded,
PacketsReceivedErrors,
PacketsReceivedPerSec,
PacketsReceivedUnknown,
PacketsSentPerSec,
CurrentBandwidth,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Network Interface", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "addresses") {
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
slog.String("collector", Name),
)
}
c.bytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"),
"(Network.BytesReceivedPerSec)",
[]string{"nic"},
nil,
)
c.bytesSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_sent_total"),
"(Network.BytesSentPerSec)",
[]string{"nic"},
nil,
)
c.bytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_total"),
"(Network.BytesTotalPerSec)",
[]string{"nic"},
nil,
)
c.outputQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "output_queue_length_packets"),
"(Network.OutputQueueLength)",
[]string{"nic"},
nil,
)
c.packetsOutboundDiscarded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_outbound_discarded_total"),
"(Network.PacketsOutboundDiscarded)",
[]string{"nic"},
nil,
)
c.packetsOutboundErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_outbound_errors_total"),
"(Network.PacketsOutboundErrors)",
[]string{"nic"},
nil,
)
c.packetsReceivedDiscarded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_discarded_total"),
"(Network.PacketsReceivedDiscarded)",
[]string{"nic"},
nil,
)
c.packetsReceivedErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_errors_total"),
"(Network.PacketsReceivedErrors)",
[]string{"nic"},
nil,
)
c.packetsReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
"(Network.PacketsReceivedPerSec)",
[]string{"nic"},
nil,
)
c.packetsReceivedUnknown = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_unknown_total"),
"(Network.PacketsReceivedUnknown)",
[]string{"nic"},
nil,
)
c.packetsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_total"),
"(Network.PacketsPerSec)",
[]string{"nic"},
nil,
)
c.packetsSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_sent_total"),
"(Network.PacketsSentPerSec)",
[]string{"nic"},
nil,
)
c.currentBandwidth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_bandwidth_bytes"),
"(Network.CurrentBandwidth)",
[]string{"nic"},
nil,
)
c.nicAddressInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "nic_address_info"),
"A metric with a constant '1' value labeled with the network interface's address information.",
[]string{"nic", "friendly_name", "address", "family"},
nil,
)
c.routeInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "route_info"),
"A metric with a constant '1' value labeled with the network interface's route information.",
[]string{"nic", "src", "dest", "metric"},
nil,
)
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
var err error
if utils.PDHEnabled() {
err = c.collectPDH(ch)
} else {
err = c.collect(ctx, logger, ch)
}
if err != nil {
return fmt.Errorf("failed collecting net metrics: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "nic_addresses") {
if err := c.collectNICAddresses(ch); err != nil {
return fmt.Errorf("failed collecting net addresses: %w", err)
}
}
return nil
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []perflibNetworkInterface
if err := v1.UnmarshalObject(ctx.PerfObjects["Network Interface"], &dst, logger); err != nil {
return err
}
for _, nic := range dst {
if c.config.NicExclude.MatchString(nic.Name) ||
!c.config.NicInclude.MatchString(nic.Name) {
continue
}
// Counters
ch <- prometheus.MustNewConstMetric(
c.bytesReceivedTotal,
prometheus.CounterValue,
nic.BytesReceivedPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesSentTotal,
prometheus.CounterValue,
nic.BytesSentPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesTotal,
prometheus.CounterValue,
nic.BytesTotalPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.outputQueueLength,
prometheus.GaugeValue,
nic.OutputQueueLength,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundDiscarded,
prometheus.CounterValue,
nic.PacketsOutboundDiscarded,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundErrors,
prometheus.CounterValue,
nic.PacketsOutboundErrors,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsTotal,
prometheus.CounterValue,
nic.PacketsPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedDiscarded,
prometheus.CounterValue,
nic.PacketsReceivedDiscarded,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedErrors,
prometheus.CounterValue,
nic.PacketsReceivedErrors,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
nic.PacketsReceivedPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedUnknown,
prometheus.CounterValue,
nic.PacketsReceivedUnknown,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsSentTotal,
prometheus.CounterValue,
nic.PacketsSentPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.currentBandwidth,
prometheus.GaugeValue,
nic.CurrentBandwidth/8,
nic.Name,
)
}
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Network Information metrics: %w", err)
}
for nicName, nicData := range data {
if c.config.NicExclude.MatchString(nicName) ||
!c.config.NicInclude.MatchString(nicName) {
continue
}
// Counters
ch <- prometheus.MustNewConstMetric(
c.bytesReceivedTotal,
prometheus.CounterValue,
nicData[BytesReceivedPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.bytesSentTotal,
prometheus.CounterValue,
nicData[BytesSentPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.bytesTotal,
prometheus.CounterValue,
nicData[BytesTotalPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.outputQueueLength,
prometheus.GaugeValue,
nicData[OutputQueueLength].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundDiscarded,
prometheus.CounterValue,
nicData[PacketsOutboundDiscarded].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundErrors,
prometheus.CounterValue,
nicData[PacketsOutboundErrors].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsTotal,
prometheus.CounterValue,
nicData[PacketsPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedDiscarded,
prometheus.CounterValue,
nicData[PacketsReceivedDiscarded].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedErrors,
prometheus.CounterValue,
nicData[PacketsReceivedErrors].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
nicData[PacketsReceivedPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedUnknown,
prometheus.CounterValue,
nicData[PacketsReceivedUnknown].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsSentTotal,
prometheus.CounterValue,
nicData[PacketsSentPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.currentBandwidth,
prometheus.GaugeValue,
nicData[CurrentBandwidth].FirstValue/8,
nicName,
)
}
return nil
}
var addressFamily = map[uint16]string{
windows.AF_INET: "ipv4",
windows.AF_INET6: "ipv6",
}
func (c *Collector) collectNICAddresses(ch chan<- prometheus.Metric) error {
nicAdapterAddresses, err := adapterAddresses()
if err != nil {
return err
}
convertNicName := strings.NewReplacer("(", "[", ")", "]")
for _, nicAdapterAddress := range nicAdapterAddresses {
friendlyName := windows.UTF16PtrToString(nicAdapterAddress.FriendlyName)
nicName := windows.UTF16PtrToString(nicAdapterAddress.Description)
if c.config.NicExclude.MatchString(nicName) ||
!c.config.NicInclude.MatchString(nicName) {
continue
}
for address := nicAdapterAddress.FirstUnicastAddress; address != nil; address = address.Next {
ipAddr := address.Address.IP()
if ipAddr == nil || !ipAddr.IsGlobalUnicast() {
continue
}
ch <- prometheus.MustNewConstMetric(
c.nicAddressInfo,
prometheus.GaugeValue,
1,
convertNicName.Replace(nicName),
friendlyName,
ipAddr.String(),
addressFamily[address.Address.Sockaddr.Addr.Family],
)
}
for address := nicAdapterAddress.FirstAnycastAddress; address != nil; address = address.Next {
ipAddr := address.Address.IP()
if ipAddr == nil || !ipAddr.IsGlobalUnicast() {
continue
}
ch <- prometheus.MustNewConstMetric(
c.nicAddressInfo,
prometheus.GaugeValue,
1,
convertNicName.Replace(nicName),
friendlyName,
ipAddr.String(),
addressFamily[address.Address.Sockaddr.Addr.Family],
)
}
}
return nil
}
// adapterAddresses returns a list of IP adapter and address
// structures. The structure contains an IP adapter and flattened
// multiple IP addresses including unicast, anycast and multicast
// addresses.
func adapterAddresses() ([]*windows.IpAdapterAddresses, error) {
var b []byte
l := uint32(15000) // recommended initial size
for {
b = make([]byte, l)
const flags = windows.GAA_FLAG_SKIP_MULTICAST | windows.GAA_FLAG_SKIP_DNS_SERVER
err := windows.GetAdaptersAddresses(windows.AF_UNSPEC, flags, 0, (*windows.IpAdapterAddresses)(unsafe.Pointer(&b[0])), &l)
if err == nil {
if l == 0 {
return nil, nil
}
break
}
if !errors.Is(err, windows.ERROR_BUFFER_OVERFLOW) {
return nil, os.NewSyscallError("getadaptersaddresses", err)
}
if l <= uint32(len(b)) {
return nil, os.NewSyscallError("getadaptersaddresses", err)
}
}
var addresses []*windows.IpAdapterAddresses
for address := (*windows.IpAdapterAddresses)(unsafe.Pointer(&b[0])); address != nil; address = address.Next {
addresses = append(addresses, address)
}
return addresses, nil
}

View File

@@ -6,8 +6,8 @@ import (
"testing" "testing"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/collector/net" "github.com/prometheus-community/windows_exporter/internal/collector/net"
"github.com/prometheus-community/windows_exporter/pkg/testutils" "github.com/prometheus-community/windows_exporter/internal/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {

View File

@@ -0,0 +1,14 @@
//go:build windows
package net_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/net"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func TestCollector(t *testing.T) {
testutils.TestCollector(t, net.New, nil)
}

View File

@@ -9,9 +9,9 @@ import (
"slices" "slices"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework" const Name = "netframework"
@@ -47,7 +47,7 @@ const (
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics.
type Collector struct { type Collector struct {
config Config config Config
wmiClient *wmi.Client miSession *mi.Session
// clrexceptions // clrexceptions
numberOfExceptionsThrown *prometheus.Desc numberOfExceptionsThrown *prometheus.Desc
@@ -143,12 +143,12 @@ func (c *Collector) Close(_ *slog.Logger) error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error { func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil { if miSession == nil {
return errors.New("wmiClient or SWbemServicesClient is nil") return errors.New("miSession is nil")
} }
c.wmiClient = wmiClient c.miSession = miSession
if slices.Contains(c.config.CollectorsEnabled, collectorClrExceptions) { if slices.Contains(c.config.CollectorsEnabled, collectorClrExceptions) {
c.buildClrExceptions() c.buildClrExceptions()

View File

@@ -3,7 +3,11 @@
package netframework package netframework
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -35,19 +39,19 @@ func (c *Collector) buildClrExceptions() {
} }
type Win32_PerfRawData_NETFramework_NETCLRExceptions struct { type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
Name string Name string `mi:"Name"`
NumberofExcepsThrown uint32 NumberofExcepsThrown uint32 `mi:"NumberofExcepsThrown"`
NumberofExcepsThrownPersec uint32 NumberofExcepsThrownPersec uint32 `mi:"NumberofExcepsThrownPersec"`
NumberofFiltersPersec uint32 NumberofFiltersPersec uint32 `mi:"NumberofFiltersPersec"`
NumberofFinallysPersec uint32 NumberofFinallysPersec uint32 `mi:"NumberofFinallysPersec"`
ThrowToCatchDepthPersec uint32 ThrowToCatchDepthPersec uint32 `mi:"ThrowToCatchDepthPersec"`
} }
func (c *Collector) collectClrExceptions(ch chan<- prometheus.Metric) error { func (c *Collector) collectClrExceptions(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRExceptions", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRExceptions"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, process := range dst { for _, process := range dst {

View File

@@ -3,7 +3,11 @@
package netframework package netframework
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -29,19 +33,19 @@ func (c *Collector) buildClrInterop() {
} }
type Win32_PerfRawData_NETFramework_NETCLRInterop struct { type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
Name string Name string `mi:"Name"`
NumberofCCWs uint32 NumberofCCWs uint32 `mi:"NumberofCCWs"`
Numberofmarshalling uint32 Numberofmarshalling uint32 `mi:"Numberofmarshalling"`
NumberofStubs uint32 NumberofStubs uint32 `mi:"NumberofStubs"`
NumberofTLBexportsPersec uint32 NumberofTLBexportsPersec uint32 `mi:"NumberofTLBexportsPersec"`
NumberofTLBimportsPersec uint32 NumberofTLBimportsPersec uint32 `mi:"NumberofTLBimportsPersec"`
} }
func (c *Collector) collectClrInterop(ch chan<- prometheus.Metric) error { func (c *Collector) collectClrInterop(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRInterop", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRInterop"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, process := range dst { for _, process := range dst {

View File

@@ -3,7 +3,11 @@
package netframework package netframework
import ( import (
"github.com/prometheus-community/windows_exporter/pkg/types" "fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -35,21 +39,21 @@ func (c *Collector) buildClrJIT() {
} }
type Win32_PerfRawData_NETFramework_NETCLRJit struct { type Win32_PerfRawData_NETFramework_NETCLRJit struct {
Name string Name string `mi:"Name"`
Frequency_PerfTime uint32 Frequency_PerfTime uint32 `mi:"Frequency_PerfTime"`
ILBytesJittedPersec uint32 ILBytesJittedPersec uint32 `mi:"ILBytesJittedPersec"`
NumberofILBytesJitted uint32 NumberofILBytesJitted uint32 `mi:"NumberofILBytesJitted"`
NumberofMethodsJitted uint32 NumberofMethodsJitted uint32 `mi:"NumberofMethodsJitted"`
PercentTimeinJit uint32 PercentTimeinJit uint32 `mi:"PercentTimeinJit"`
StandardJitFailures uint32 StandardJitFailures uint32 `mi:"StandardJitFailures"`
TotalNumberofILBytesJitted uint32 TotalNumberofILBytesJitted uint32 `mi:"TotalNumberofILBytesJitted"`
} }
func (c *Collector) collectClrJIT(ch chan<- prometheus.Metric) error { func (c *Collector) collectClrJIT(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRJit var dst []Win32_PerfRawData_NETFramework_NETCLRJit
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRJit", &dst); err != nil { if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRJit"))); err != nil {
return err return fmt.Errorf("WMI query failed: %w", err)
} }
for _, process := range dst { for _, process := range dst {

Some files were not shown because too many files have changed in this diff Show More