Compare commits

..

1 Commits

Author SHA1 Message Date
Ashok Siyani
d9404c6cc2 added terminal services collector for number of session count 2020-03-23 09:40:40 +00:00
209 changed files with 5742 additions and 23332 deletions

1
.github/CODEOWNERS vendored
View File

@@ -1 +0,0 @@
* @prometheus-community/windows_exporter-reviewers

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -1,105 +0,0 @@
name: Linting
# Trigger on pull requests and pushes to master branch where Go-related files
# have been changed.
on:
push:
paths:
- "go.mod"
- "go.sum"
- "**.go"
- ".github/workflows/lint.yml"
- "tools/e2e-output.txt"
branches:
- master
pull_request:
paths:
- "go.mod"
- "go.sum"
- "**.go"
- ".github/workflows/lint.yml"
- "tools/e2e-output.txt"
branches:
- master
env:
PROMU_VER: '0.14.0'
PROMTOOL_VER: '2.43.0'
jobs:
test:
runs-on: windows-2019
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '^1.20.2'
- name: Test
run: make test
- name: Install e2e deps
run: |
Invoke-WebRequest -Uri https://github.com/prometheus/promu/releases/download/v$($Env:PROMU_VER)/promu-$($Env:PROMU_VER).windows-amd64.zip -OutFile promu-$($Env:PROMU_VER).windows-amd64.zip
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.2.0
# GOPATH\bin dir must be appended to PATH else the `promu` command won't be found
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: e2e Test
run: make e2e-test
promtool:
runs-on: windows-2019
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '^1.20.2'
- name: Install promtool
run: |
Invoke-WebRequest -Uri https://github.com/prometheus/prometheus/releases/download/v$($Env:PROMTOOL_VER)/prometheus-$($Env:PROMTOOL_VER).windows-amd64.zip -OutFile prometheus-$($Env:PROMTOOL_VER).windows-amd64.zip
Expand-Archive -Path prometheus-$($Env:PROMTOOL_VER).windows-amd64.zip -DestinationPath .
Copy-Item -Path prometheus-$($Env:PROMTOOL_VER).windows-amd64\promtool.exe -Destination "$(go env GOPATH)\bin"
Invoke-WebRequest -Uri https://github.com/prometheus/promu/releases/download/v$($Env:PROMU_VER)/promu-$($Env:PROMU_VER).windows-amd64.zip -OutFile promu-$($Env:PROMU_VER).windows-amd64.zip
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
# No binaries available so build from source
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.2.0
# GOPATH\bin dir must be appended to PATH else the `promu` command won't be found
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Promtool
run: make promtool
lint:
runs-on: windows-2022
steps:
# `gofmt` linter run by golangci-lint fails on CRLF line endings (the default for Windows)
- name: Set git to use LF
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '^1.20.2'
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.51.2
args: "--timeout=5m"
# golangci-lint action doesn't always provide helpful output, so re-run without the action for
# better output of the problem.
# The cache from the golangci-lint step is re-used here, so this step should finish quickly.
- name: errors
if: ${{ failure() }}
run: golangci-lint run --timeout=5m -c .golangci.yaml

View File

@@ -1,106 +0,0 @@
name: Releases
# Trigger on releases.
on:
release:
types:
- published
- edited
permissions:
contents: write
packages: write
env:
PROMU_VER: '0.14.0'
jobs:
build:
runs-on: windows-2022
steps:
- uses: actions/checkout@v3
with:
# fetch-depth required for gitversion in `Build` step
fetch-depth: 0
- uses: actions/setup-go@v3
with:
go-version: '^1.20.2'
- name: Install Build deps
run: |
dotnet tool install --global GitVersion.Tool --version 5.*
Invoke-WebRequest -Uri https://github.com/prometheus/promu/releases/download/v$($Env:PROMU_VER)/promu-$($Env:PROMU_VER).windows-amd64.zip -OutFile promu-$($Env:PROMU_VER).windows-amd64.zip
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
# No binaries available so build from source
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0
# GOPATH\bin dir must be added to PATH else the `promu` and `goversioninfo` commands won't be found
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Build
run: |
$ErrorActionPreference = "Stop"
dotnet-gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
$Version = Get-Content VERSION
# Windows versioninfo resources need the file version by parts (but product version is free text)
$VersionParts = ($Version -replace '^v?([0-9\.]+).*$','$1').Split(".")
goversioninfo.exe -ver-major $VersionParts[0] -ver-minor $VersionParts[1] -ver-patch $VersionParts[2] -product-version $Version -platform-specific
make crossbuild
# '+' symbols are invalid characters in image tags
(Get-Content -Path VERSION) -replace '\+', '_' | Set-Content -Path VERSION
make build-all
# GH requires all files to have different names, so add version/arch to differentiate
foreach($Arch in "amd64", "arm64","386") {
Move-Item output\$Arch\windows_exporter.exe output\windows_exporter-$Version-$Arch.exe
}
- name: Upload Artifacts
uses: actions/upload-artifact@v3
with:
name: windows_exporter_binaries
path: output\windows_exporter-*.exe
- name: Build Release Artifacts
if: startsWith(github.ref, 'refs/tags/')
run: |
$ErrorActionPreference = "Stop"
$BuildVersion = Get-Content VERSION
$TagName = $env:GITHUB_REF -replace 'refs/tags/', ''
# The MSI version is not semver compliant, so just take the numerical parts
$MSIVersion = $TagName -replace '^v?([0-9\.]+).*$','$1'
foreach($Arch in "amd64", "386") {
Write-Verbose "Building windows_exporter $MSIVersion msi for $Arch"
.\installer\build.ps1 -PathToExecutable .\output\windows_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
Move-Item installer\Output\windows_exporter-$MSIVersion-$Arch.msi output\
}
promu checksum output\
- name: Login to GitHub container registry
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push Latest image
if: ${{ github.event_name != 'pull_request' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
$Env:VERSION = 'latest'
make push-all
- name: Release
if: startsWith(github.ref, 'refs/tags/')
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
$TagName = $env:GITHUB_REF -replace 'refs/tags/', ''
Get-ChildItem -Path output\* -Include @('windows_exporter*.msi', 'windows_exporter*.exe', 'sha256sums.txt') | Foreach-Object {gh release upload $TagName $_}
make push-all

View File

@@ -1,26 +0,0 @@
name: Spell checking
# Trigger on pull requests, and pushes to master branch.
on:
push:
branches:
- master
pull_request:
branches:
- master
env:
PROMU_VER: 'v0.14.0'
jobs:
codespell:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: codespell-project/actions-codespell@master
with:
check_filenames: true
# When using this Action in other repos, the --skip option below can be removed
skip: ./.git,go.mod,go.sum
ignore_words_list: calle

3
.gitignore vendored
View File

@@ -4,5 +4,4 @@ VERSION
*.un~
output/
.vscode
.idea
*.syso
.idea

View File

@@ -3,10 +3,11 @@ linters:
enable:
- deadcode
- errcheck
- revive
- golint
- govet
- gofmt
- ineffassign
- interfacer
- structcheck
- unconvert
- varcheck
@@ -19,7 +20,4 @@ issues:
- # Golint has many capitalisation complaints on WMI class names
text: "`?\\w+`? should be `?\\w+`?"
linters:
- revive
- text: "don't use ALL_CAPS in Go names; use CamelCase"
linters:
- revive
- golint

View File

@@ -1,10 +1,8 @@
go:
version: 1.20
repository:
path: github.com/prometheus-community/windows_exporter
path: github.com/martinlindhe/wmi_exporter
build:
binaries:
- name: windows_exporter
- name: wmi_exporter
ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}}
-X github.com/prometheus/common/version.Revision={{.Revision}}
@@ -16,4 +14,5 @@ tarball:
- LICENSE
crossbuild:
platforms:
- windows
- windows/amd64
- windows/386

4
AUTHORS.md Normal file
View File

@@ -0,0 +1,4 @@
Contributors in alphabetical order
* [Martin Lindhe](https://github.com/martinlindhe)
* [Calle Pettersson](https://github.com/carlpett)

View File

@@ -1,3 +0,0 @@
## Prometheus Community Code of Conduct
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@@ -1,9 +0,0 @@
# Note this image doesn't really matter for hostprocess but it is good to build per OS version
# the files in the image are copied to $env:CONTAINER_SANDBOX_MOUNT_POINT on the host
# but the file system is the Host NOT the container
ARG BASE="mcr.microsoft.com/windows/nanoserver:1809"
FROM $BASE
ENV PATH="C:\Windows\system32;C:\Windows;"
COPY output/amd64/windows_exporter.exe /windows_exporter.exe
ENTRYPOINT ["windows_exporter.exe"]

View File

@@ -1,9 +0,0 @@
Maintainers in alphabetical order
* [Ben Reedy](https://github.com/breed808) - breed808@breed808.com
* [Calle Pettersson](https://github.com/carlpett) - calle@cape.nu
Alumni
* [Brian Brazil](https://github.com/brian-brazil)
* [Martin Lindhe](https://github.com/martinlindhe)

View File

@@ -1,38 +1,14 @@
export GOOS=windows
export DOCKER_IMAGE_NAME ?= windows-exporter
export DOCKER_REPO ?= ghcr.io/prometheus-community
VERSION?=$(shell cat VERSION)
DOCKER?=docker
# Image Variables for Hostprocess Container
# Windows image build is heavily influenced by https://github.com/kubernetes/kubernetes/blob/master/cluster/images/etcd/Makefile
OS=1809
ALL_OS:= 1809 ltsc2022
BASE_IMAGE=mcr.microsoft.com/windows/nanoserver
.PHONY: build
build: windows_exporter.exe
windows_exporter.exe: **/*.go
build:
promu build -v
test:
go test -v ./...
bench:
go test -v -bench='benchmark(cpu|logicaldisk|logon|memory|net|process|service|system|tcp|time)collector' ./...
lint:
golangci-lint -c .golangci.yaml run
.PHONY: e2e-test
e2e-test: windows_exporter.exe
pwsh -NonInteractive -ExecutionPolicy Bypass -File .\tools\end-to-end-test.ps1
.PHONY: promtool
promtool: windows_exporter.exe
pwsh -NonInteractive -ExecutionPolicy Bypass -File .\tools\promtool.ps1
fmt:
gofmt -l -w -s .
@@ -40,25 +16,4 @@ crossbuild:
# The prometheus/golang-builder image for promu crossbuild doesn't exist
# on Windows, so for now, we'll just build twice
GOARCH=amd64 promu build --prefix=output/amd64
GOARCH=arm64 promu build --prefix=output/arm64
GOARCH=386 promu build --prefix=output/386
build-image: crossbuild
$(DOCKER) build --build-arg=BASE=$(BASE_IMAGE):$(OS) -f Dockerfile -t $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$(OS) .
sub-build-%:
$(MAKE) OS=$* build-image
build-all: $(addprefix sub-build-,$(ALL_OS))
push:
set -x; \
for osversion in ${ALL_OS}; do \
$(DOCKER) push $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
$(DOCKER) manifest create --amend $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION) $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
full_version=`$(DOCKER) manifest inspect $(BASE_IMAGE):$${osversion} | grep "os.version" | head -n 1 | awk -F\" '{print $$4}'` || true; \
$(DOCKER) manifest annotate --os windows --arch amd64 --os-version $${full_version} $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION) $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
done
$(DOCKER) manifest push --purge $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)
push-all: build-all push

134
README.md
View File

@@ -1,8 +1,8 @@
# windows_exporter
# WMI exporter
![Build Status](https://github.com/prometheus-community/windows_exporter/workflows/windows_exporter%20CI/CD/badge.svg)
[![Build status](https://ci.appveyor.com/api/projects/status/ljwan71as6pf2joe/branch/master?svg=true)](https://ci.appveyor.com/project/martinlindhe/wmi-exporter)
A Prometheus exporter for Windows machines.
Prometheus exporter for Windows machines, using the WMI (Windows Management Instrumentation).
## Collectors
@@ -10,28 +10,16 @@ A Prometheus exporter for Windows machines.
Name | Description | Enabled by default
---------|-------------|--------------------
[ad](docs/collector.ad.md) | Active Directory Domain Services |
[adcs](docs/collector.adcs.md) | Active Directory Certificate Services |
[adfs](docs/collector.adfs.md) | Active Directory Federation Services |
[cache](docs/collector.cache.md) | Cache metrics |
[cpu](docs/collector.cpu.md) | CPU usage | ✓
[cpu_info](docs/collector.cpu_info.md) | CPU Information |
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | ✓
[container](docs/collector.container.md) | Container metrics |
[dfsr](docs/collector.dfsr.md) | DFSR metrics |
[dhcp](docs/collector.dhcp.md) | DHCP Server |
[dns](docs/collector.dns.md) | DNS Server |
[exchange](docs/collector.exchange.md) | Exchange metrics |
[fsrmquota](docs/collector.fsrmquota.md) | Microsoft File Server Resource Manager (FSRM) Quotas collector |
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
[iis](docs/collector.iis.md) | IIS sites and applications |
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
[logon](docs/collector.logon.md) | User logon sessions |
[memory](docs/collector.memory.md) | Memory usage metrics |
[mscluster_cluster](docs/collector.mscluster_cluster.md) | MSCluster cluster metrics |
[mscluster_network](docs/collector.mscluster_network.md) | MSCluster network metrics |
[mscluster_node](docs/collector.mscluster_node.md) | MSCluster Node metrics |
[mscluster_resource](docs/collector.mscluster_resource.md) | MSCluster Resource metrics |
[mscluster_resourcegroup](docs/collector.mscluster_resourcegroup.md) | MSCluster ResourceGroup metrics |
[msmq](docs/collector.msmq.md) | MSMQ queues |
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
[netframework_clrexceptions](docs/collector.netframework_clrexceptions.md) | .NET Framework CLR Exceptions |
@@ -45,57 +33,19 @@ Name | Description | Enabled by default
[net](docs/collector.net.md) | Network interface I/O | ✓
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | ✓
[process](docs/collector.process.md) | Per-process metrics |
[remote_fx](docs/collector.remote_fx.md) | RemoteFX protocol (RDP) metrics |
[scheduled_task](docs/collector.scheduled_task.md) | Scheduled Tasks metrics |
[service](docs/collector.service.md) | Service state metrics | ✓
[smtp](docs/collector.smtp.md) | IIS SMTP Server |
[system](docs/collector.system.md) | System calls | ✓
[tcp](docs/collector.tcp.md) | TCP connections |
[teradici_pcoip](docs/collector.teradici_pcoip.md) | [Teradici PCoIP](https://www.teradici.com/web-help/pcoip_wmi_specs/) session metrics |
[time](docs/collector.time.md) | Windows Time Service |
[thermalzone](docs/collector.thermalzone.md) | Thermal information
[terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS)
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | ✓
[vmware_blast](docs/collector.vmware_blast.md) | VMware Blast session metrics |
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples.
### Filtering enabled collectors
The `windows_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families.
For advanced use the `windows_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>).
```
params:
collect[]:
- foo
- bar
```
This can be useful for having different Prometheus servers collect specific metrics from nodes.
## Flags
windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above.
Flag | Description | Default value
---------|-------------|--------------------
`--web.listen-address` | host:port for exporter. | `:9182`
`--telemetry.path` | URL path for surfacing collected metrics. | `/metrics`
`--telemetry.max-requests` | Maximum number of concurrent requests. 0 to disable. | `5`
`--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default." | `[defaults]`
`--collectors.print` | If true, print available collectors and exit. |
`--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5`
`--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None
`--config.file` | [Using a config file](#using-a-configuration-file) from path or URL | None
`--config.file.insecure-skip-verify` | Skip TLS when loading config file from URL | false
## Installation
The latest release can be downloaded from the [releases page](https://github.com/prometheus-community/windows_exporter/releases).
The latest release can be downloaded from the [releases page](https://github.com/martinlindhe/wmi_exporter/releases).
Each release provides a .msi installer. The installer will setup the windows_exporter as a Windows service, as well as create an exception in the Windows Firewall.
Each release provides a .msi installer. The installer will setup the WMI Exporter as a Windows service, as well as create an exception in the Windows Firewall.
If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc. The following parameters are available:
@@ -106,7 +56,6 @@ Name | Description
`LISTEN_PORT` | The port to bind to. Defaults to 9182.
`METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics`
`TEXTFILE_DIR` | As the `--collector.textfile.directory` flag, provide a directory to read text files with metrics from
`REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address).
`EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string.
Parameters are sent to the installer via `msiexec`. Example invocations:
@@ -122,32 +71,21 @@ msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--c
On some older versions of Windows you may need to surround parameter values with double quotes to get the install command parsing properly:
```powershell
msiexec /i C:\Users\Administrator\Downloads\windows_exporter.msi ENABLED_COLLECTORS="ad,iis,logon,memory,process,tcp,thermalzone" TEXTFILE_DIR="C:\custom_metrics\"
msiexec /i C:\Users\Administrator\Downloads\wmi_exporter.msi ENABLED_COLLECTORS="ad,iis,logon,memory,process,tcp,thermalzone" TEXTFILE_DIR="C:\custom_metrics\"
```
Powershell versions 7.3 and above require [PSNativeCommandArgumentPassing](https://learn.microsoft.com/en-us/powershell/scripting/learn/experimental-features?view=powershell-7.3) to be set to `Legacy` when using `--% EXTRA_FLAGS`:
## Roadmap
```powershell
$PSNativeCommandArgumentPassing = 'Legacy'
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--collector.service.services-where ""Name LIKE 'sql%'"""
```
See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
## Kubernetes Implementation
See detailed steps to install on Windows Kubernetes [here](./kubernetes/kubernetes.md).
## Supported versions
windows_exporter supports Windows Server versions 2008R2 and later, and desktop Windows version 7 and later.
## Usage
go get -u github.com/prometheus/promu
go get -u github.com/prometheus-community/windows_exporter
cd $env:GOPATH/src/github.com/prometheus-community/windows_exporter
promu build -v
.\windows_exporter.exe
go get -u github.com/martinlindhe/wmi_exporter
cd $env:GOPATH/src/github.com/martinlindhe/wmi_exporter
promu build -v .
.\wmi_exporter.exe
The prometheus metrics will be exposed on [localhost:9182](http://localhost:9182)
@@ -155,57 +93,17 @@ The prometheus metrics will be exposed on [localhost:9182](http://localhost:9182
### Enable only service collector and specify a custom query
.\windows_exporter.exe --collectors.enabled "service" --collector.service.services-where "Name='windows_exporter'"
.\wmi_exporter.exe --collectors.enabled "service" --collector.service.services-where "Name='wmi_exporter'"
### Enable only process collector and specify a custom query
.\windows_exporter.exe --collectors.enabled "process" --collector.process.include="firefox.+"
.\wmi_exporter.exe --collectors.enabled "process" --collector.process.processes-where "Name LIKE 'firefox%'"
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the [regular expression](https://en.wikipedia.org/wiki/Regular_expression) must use `.+`. See [process](docs/collector.process.md) for more information.
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the query needs to be a wildcard search using a `%` character.
### Using [defaults] with `--collectors.enabled` argument
Please note that in Windows batch scripts (and when using the `cmd` command prompt), the `%` character is reserved, so it has to be escaped with another `%`. For example, the wildcard syntax for searching for all firefox processes is `firefox%%`.
Using `[defaults]` with `--collectors.enabled` argument which gets expanded with all default collectors.
.\windows_exporter.exe --collectors.enabled "[defaults],process,container"
This enables the additional process and container collectors on top of the defaults.
### Using a configuration file
YAML configuration files can be specified with the `--config.file` flag. e.g. `.\windows_exporter.exe --config.file=config.yml`. If you are using the absolute path, make sure to quote the path, e.g. `.\windows_exporter.exe --config.file="C:\Program Files\windows_exporter\config.yml"`
It is also possible to load the configuration from a URL. e.g. `.\windows_exporter.exe --config.file="https://example.com/config.yml"`
If you need to skip TLS verification, you can use the `--config.file.insecure-skip-verify` flag. e.g. `.\windows_exporter.exe --config.file="https://example.com/config.yml" --config.file.insecure-skip-verify`
```yaml
collectors:
enabled: cpu,cs,net,service
collector:
service:
services-where: "Name='windows_exporter'"
log:
level: warn
```
An example configuration file can be found [here](docs/example_config.yml).
#### Configuration file notes
Configuration file values can be mixed with CLI flags. E.G.
`.\windows_exporter.exe --collectors.enabled=cpu,logon`
```yaml
log:
level: debug
```
CLI flags enjoy a higher priority over values specified in the configuration file.
## License
Under [MIT](LICENSE)
[web_config]: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md

View File

@@ -1,6 +0,0 @@
# Reporting a security issue
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
https://prometheus.io/docs/operating/security/

78
appveyor.yml Normal file
View File

@@ -0,0 +1,78 @@
version: "{build}"
os: Visual Studio 2017
build: off
stack: go 1.13
environment:
GOPATH: c:\gopath
GO111MODULE: on
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
install:
- mkdir %GOPATH%\bin
- set PATH=%GOPATH%\bin;%PATH%
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin
- choco install gitversion.portable make -y
- ps: |
appveyor DownloadFile https://github.com/golangci/golangci-lint/releases/download/v1.21.0/golangci-lint-1.21.0-windows-amd64.zip
Expand-Archive golangci-lint-1.21.0-windows-amd64.zip
Move-Item golangci-lint-1.21.0-windows-amd64\golangci-lint-1.21.0-windows-amd64\golangci-lint.exe $env:GOPATH\bin\golangci-lint.exe
- ps: |
$env:GO111MODULE="off"
go get -u github.com/prometheus/promu
$env:GO111MODULE="on"
test_script:
- make test
after_test:
- make lint
build_script:
- ps: |
# go mod download (or, if we don't call it, go build) will write every dependent package name to
# stderr, which will be interpreted as an error and abort the build if ErrorActionPreference is Stop,
# so we need to run it before setting the preference.
go mod download
$ErrorActionPreference = "Stop"
gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
$Version = Get-Content VERSION
make crossbuild
# GH requires all files to have different names, so add version/arch to differentiate
foreach($Arch in "amd64","386") {
Rename-Item output\$Arch\wmi_exporter.exe -NewName wmi_exporter-$Version-$Arch.exe
}
after_build:
- ps: |
# Build installer packages only on tagged releases
if($env:APPVEYOR_REPO_TAG -ne "True") {
return
}
$ErrorActionPreference = "Stop"
$BuildVersion = Get-Content VERSION
# The MSI version is not semver compliant, so just take the numerical parts
$MSIVersion = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
foreach($Arch in "amd64","386") {
Write-Verbose "Building wmi_exporter $MSIVersion msi for $Arch"
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
Move-Item installer\Output\wmi_exporter-$MSIVersion-$Arch.msi output\$Arch\
}
- promu checksum output\
artifacts:
- name: Artifacts
path: output\**\*
deploy:
- provider: GitHub
description: WMI Exporter version $(appveyor_build_version)
artifact: Artifacts
auth_token:
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
draft: false
prerelease: false
on:
appveyor_repo_tag: true

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package collector
@@ -6,16 +5,17 @@ package collector
import (
"errors"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("ad", NewADCollector)
}
// A ADCollector is a Prometheus collector for WMI Win32_PerfRawData_DirectoryServices_DirectoryServices metrics
type ADCollector struct {
logger log.Logger
AddressBookOperationsTotal *prometheus.Desc
AddressBookClientSessions *prometheus.Desc
ApproximateHighestDistinguishedNameTag *prometheus.Desc
@@ -79,12 +79,10 @@ type ADCollector struct {
TombstonedObjectsVisitedTotal *prometheus.Desc
}
// newADCollector ...
func newADCollector(logger log.Logger) (Collector, error) {
// NewADCollector ...
func NewADCollector() (Collector, error) {
const subsystem = "ad"
return &ADCollector{
logger: log.With(logger, "collector", subsystem),
AddressBookOperationsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "address_book_operations_total"),
"",
@@ -458,7 +456,7 @@ func newADCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ad metrics", "desc", desc, "err", err)
log.Error("failed collecting ad metrics:", desc, err)
return err
}
return nil
@@ -618,7 +616,7 @@ type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
func (c *ADCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_DirectoryServices_DirectoryServices
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkADCollector(b *testing.B) {
benchmarkCollector(b, "ad", newADCollector)
}

View File

@@ -1,244 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
type adcsCollector struct {
logger log.Logger
RequestsPerSecond *prometheus.Desc
RequestProcessingTime *prometheus.Desc
RetrievalsPerSecond *prometheus.Desc
RetrievalProcessingTime *prometheus.Desc
FailedRequestsPerSecond *prometheus.Desc
IssuedRequestsPerSecond *prometheus.Desc
PendingRequestsPerSecond *prometheus.Desc
RequestCryptographicSigningTime *prometheus.Desc
RequestPolicyModuleProcessingTime *prometheus.Desc
ChallengeResponsesPerSecond *prometheus.Desc
ChallengeResponseProcessingTime *prometheus.Desc
SignedCertificateTimestampListsPerSecond *prometheus.Desc
SignedCertificateTimestampListProcessingTime *prometheus.Desc
}
// ADCSCollectorMethod ...
func adcsCollectorMethod(logger log.Logger) (Collector, error) {
const subsystem = "adcs"
return &adcsCollector{
logger: log.With(logger, "collector", subsystem),
RequestsPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "requests_total"),
"Total certificate requests processed",
[]string{"cert_template"},
nil,
),
RequestProcessingTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "request_processing_time_seconds"),
"Last time elapsed for certificate requests",
[]string{"cert_template"},
nil,
),
RetrievalsPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "retrievals_total"),
"Total certificate retrieval requests processed",
[]string{"cert_template"},
nil,
),
RetrievalProcessingTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "retrievals_processing_time_seconds"),
"Last time elapsed for certificate retrieval request",
[]string{"cert_template"},
nil,
),
FailedRequestsPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failed_requests_total"),
"Total failed certificate requests processed",
[]string{"cert_template"},
nil,
),
IssuedRequestsPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "issued_requests_total"),
"Total issued certificate requests processed",
[]string{"cert_template"},
nil,
),
PendingRequestsPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pending_requests_total"),
"Total pending certificate requests processed",
[]string{"cert_template"},
nil,
),
RequestCryptographicSigningTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "request_cryptographic_signing_time_seconds"),
"Last time elapsed for signing operation request",
[]string{"cert_template"},
nil,
),
RequestPolicyModuleProcessingTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "request_policy_module_processing_time_seconds"),
"Last time elapsed for policy module processing request",
[]string{"cert_template"},
nil,
),
ChallengeResponsesPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "challenge_responses_total"),
"Total certificate challenge responses processed",
[]string{"cert_template"},
nil,
),
ChallengeResponseProcessingTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "challenge_response_processing_time_seconds"),
"Last time elapsed for challenge response",
[]string{"cert_template"},
nil,
),
SignedCertificateTimestampListsPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "signed_certificate_timestamp_lists_total"),
"Total Signed Certificate Timestamp Lists processed",
[]string{"cert_template"},
nil,
),
SignedCertificateTimestampListProcessingTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "signed_certificate_timestamp_list_processing_time_seconds"),
"Last time elapsed for Signed Certificate Timestamp List",
[]string{"cert_template"},
nil,
),
}, nil
}
func (c *adcsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectADCSCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "desc", desc, "err", err)
return err
}
return nil
}
type perflibADCS struct {
Name string
RequestsPerSecond float64 `perflib:"Requests/sec"`
RequestProcessingTime float64 `perflib:"Request processing time (ms)"`
RetrievalsPerSecond float64 `perflib:"Retrievals/sec"`
RetrievalProcessingTime float64 `perflib:"Retrieval processing time (ms)"`
FailedRequestsPerSecond float64 `perflib:"Failed Requests/sec"`
IssuedRequestsPerSecond float64 `perflib:"Issued Requests/sec"`
PendingRequestsPerSecond float64 `perflib:"Pending Requests/sec"`
RequestCryptographicSigningTime float64 `perflib:"Request cryptographic signing time (ms)"`
RequestPolicyModuleProcessingTime float64 `perflib:"Request policy module processing time (ms)"`
ChallengeResponsesPerSecond float64 `perflib:"Challenge Responses/sec"`
ChallengeResponseProcessingTime float64 `perflib:"Challenge Response processing time (ms)"`
SignedCertificateTimestampListsPerSecond float64 `perflib:"Signed Certificate Timestamp Lists/sec"`
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
}
func (c *adcsCollector) collectADCSCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibADCS, 0)
if _, ok := ctx.perfObjects["Certification Authority"]; !ok {
return nil, errors.New("Perflib did not contain an entry for Certification Authority")
}
err := unmarshalObject(ctx.perfObjects["Certification Authority"], &dst, c.logger)
if err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("Perflib query for Certification Authority (ADCS) returned empty result set")
}
for _, d := range dst {
n := strings.ToLower(d.Name)
if n == "" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.RequestsPerSecond,
prometheus.CounterValue,
d.RequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RequestProcessingTime,
prometheus.GaugeValue,
milliSecToSec(d.RequestProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RetrievalsPerSecond,
prometheus.CounterValue,
d.RetrievalsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RetrievalProcessingTime,
prometheus.GaugeValue,
milliSecToSec(d.RetrievalProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailedRequestsPerSecond,
prometheus.CounterValue,
d.FailedRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.IssuedRequestsPerSecond,
prometheus.CounterValue,
d.IssuedRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PendingRequestsPerSecond,
prometheus.CounterValue,
d.PendingRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RequestCryptographicSigningTime,
prometheus.GaugeValue,
milliSecToSec(d.RequestCryptographicSigningTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RequestPolicyModuleProcessingTime,
prometheus.GaugeValue,
milliSecToSec(d.RequestPolicyModuleProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ChallengeResponsesPerSecond,
prometheus.CounterValue,
d.ChallengeResponsesPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ChallengeResponseProcessingTime,
prometheus.GaugeValue,
milliSecToSec(d.ChallengeResponseProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SignedCertificateTimestampListsPerSecond,
prometheus.CounterValue,
d.SignedCertificateTimestampListsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SignedCertificateTimestampListProcessingTime,
prometheus.GaugeValue,
milliSecToSec(d.SignedCertificateTimestampListProcessingTime),
d.Name,
)
}
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkADCSCollector(b *testing.B) {
benchmarkCollector(b, "adcs", adcsCollectorMethod)
}

View File

@@ -1,380 +1,120 @@
//go:build windows
// +build windows
package collector
import (
"math"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type adfsCollector struct {
logger log.Logger
func init() {
registerCollector("adfs", newADFSCollector)
}
adLoginConnectionFailures *prometheus.Desc
certificateAuthentications *prometheus.Desc
deviceAuthentications *prometheus.Desc
extranetAccountLockouts *prometheus.Desc
federatedAuthentications *prometheus.Desc
passportAuthentications *prometheus.Desc
passiveRequests *prometheus.Desc
passwordChangeFailed *prometheus.Desc
passwordChangeSucceeded *prometheus.Desc
tokenRequests *prometheus.Desc
windowsIntegratedAuthentications *prometheus.Desc
oAuthAuthZRequests *prometheus.Desc
oAuthClientAuthentications *prometheus.Desc
oAuthClientAuthenticationsFailures *prometheus.Desc
oAuthClientCredentialsRequestFailures *prometheus.Desc
oAuthClientCredentialsRequests *prometheus.Desc
oAuthClientPrivateKeyJwtAuthenticationFailures *prometheus.Desc
oAuthClientPrivateKeyJwtAuthentications *prometheus.Desc
oAuthClientSecretBasicAuthenticationFailures *prometheus.Desc
oAuthClientSecretBasicAuthentications *prometheus.Desc
oAuthClientSecretPostAuthenticationFailures *prometheus.Desc
oAuthClientSecretPostAuthentications *prometheus.Desc
oAuthClientWindowsIntegratedAuthenticationFailures *prometheus.Desc
oAuthClientWindowsIntegratedAuthentications *prometheus.Desc
oAuthLogonCertificateRequestFailures *prometheus.Desc
oAuthLogonCertificateTokenRequests *prometheus.Desc
oAuthPasswordGrantRequestFailures *prometheus.Desc
oAuthPasswordGrantRequests *prometheus.Desc
oAuthTokenRequests *prometheus.Desc
samlPTokenRequests *prometheus.Desc
ssoAuthenticationFailures *prometheus.Desc
ssoAuthentications *prometheus.Desc
wsfedTokenRequests *prometheus.Desc
wstrustTokenRequests *prometheus.Desc
upAuthenticationFailures *prometheus.Desc
upAuthentications *prometheus.Desc
externalAuthenticationFailures *prometheus.Desc
externalAuthentications *prometheus.Desc
artifactDBFailures *prometheus.Desc
avgArtifactDBQueryTime *prometheus.Desc
configDBFailures *prometheus.Desc
avgConfigDBQueryTime *prometheus.Desc
federationMetadataRequests *prometheus.Desc
type adfsCollector struct {
adLoginConnectionFailures *prometheus.Desc
certificateAuthentications *prometheus.Desc
deviceAuthentications *prometheus.Desc
extranetAccountLockouts *prometheus.Desc
federatedAuthentications *prometheus.Desc
passportAuthentications *prometheus.Desc
passiveRequests *prometheus.Desc
passwordChangeFailed *prometheus.Desc
passwordChangeSucceeded *prometheus.Desc
tokenRequests *prometheus.Desc
windowsIntegratedAuthentications *prometheus.Desc
}
// newADFSCollector constructs a new adfsCollector
func newADFSCollector(logger log.Logger) (Collector, error) {
func newADFSCollector() (Collector, error) {
const subsystem = "adfs"
return &adfsCollector{
logger: log.With(logger, "collector", subsystem),
adLoginConnectionFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "ad_login_connection_failures_total"),
prometheus.BuildFQName(Namespace, subsystem, "ad_login_connection_failures"),
"Total number of connection failures to an Active Directory domain controller",
nil,
nil,
),
certificateAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "certificate_authentications_total"),
prometheus.BuildFQName(Namespace, subsystem, "certificate_authentications"),
"Total number of User Certificate authentications",
nil,
nil,
),
deviceAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "device_authentications_total"),
prometheus.BuildFQName(Namespace, subsystem, "device_authentications"),
"Total number of Device authentications",
nil,
nil,
),
extranetAccountLockouts: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "extranet_account_lockouts_total"),
prometheus.BuildFQName(Namespace, subsystem, "extranet_account_lockouts"),
"Total number of Extranet Account Lockouts",
nil,
nil,
),
federatedAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "federated_authentications_total"),
prometheus.BuildFQName(Namespace, subsystem, "federated_authentications"),
"Total number of authentications from a federated source",
nil,
nil,
),
passportAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "passport_authentications_total"),
prometheus.BuildFQName(Namespace, subsystem, "passport_authentications"),
"Total number of Microsoft Passport SSO authentications",
nil,
nil,
),
passiveRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "passive_requests_total"),
prometheus.BuildFQName(Namespace, subsystem, "passive_requests"),
"Total number of passive (browser-based) requests",
nil,
nil,
),
passwordChangeFailed: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "password_change_failed_total"),
prometheus.BuildFQName(Namespace, subsystem, "password_change_failed"),
"Total number of failed password changes",
nil,
nil,
),
passwordChangeSucceeded: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "password_change_succeeded_total"),
prometheus.BuildFQName(Namespace, subsystem, "password_change_succeeded"),
"Total number of successful password changes",
nil,
nil,
),
tokenRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "token_requests_total"),
prometheus.BuildFQName(Namespace, subsystem, "token_requests"),
"Total number of token requests",
nil,
nil,
),
windowsIntegratedAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "windows_integrated_authentications_total"),
prometheus.BuildFQName(Namespace, subsystem, "windows_integrated_authentications"),
"Total number of Windows integrated authentications (Kerberos/NTLM)",
nil,
nil,
),
oAuthAuthZRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_authorization_requests_total"),
"Total number of incoming requests to the OAuth Authorization endpoint",
nil,
nil,
),
oAuthClientAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_authentication_success_total"),
"Total number of successful OAuth client Authentications",
nil,
nil,
),
oAuthClientAuthenticationsFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_authentication_failure_total"),
"Total number of failed OAuth client Authentications",
nil,
nil,
),
oAuthClientCredentialsRequestFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_credentials_failure_total"),
"Total number of failed OAuth Client Credentials Requests",
nil,
nil,
),
oAuthClientCredentialsRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_credentials_success_total"),
"Total number of successful RP tokens issued for OAuth Client Credentials Requests",
nil,
nil,
),
oAuthClientPrivateKeyJwtAuthenticationFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_privkey_jwt_authentication_failure_total"),
"Total number of failed OAuth Client Private Key Jwt Authentications",
nil,
nil,
),
oAuthClientPrivateKeyJwtAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_privkey_jwt_authentications_success_total"),
"Total number of successful OAuth Client Private Key Jwt Authentications",
nil,
nil,
),
oAuthClientSecretBasicAuthenticationFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_basic_authentications_failure_total"),
"Total number of failed OAuth Client Secret Basic Authentications",
nil,
nil,
),
oAuthClientSecretBasicAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_basic_authentications_success_total"),
"Total number of successful OAuth Client Secret Basic Authentications",
nil,
nil,
),
oAuthClientSecretPostAuthenticationFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_post_authentications_failure_total"),
"Total number of failed OAuth Client Secret Post Authentications",
nil,
nil,
),
oAuthClientSecretPostAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_post_authentications_success_total"),
"Total number of successful OAuth Client Secret Post Authentications",
nil,
nil,
),
oAuthClientWindowsIntegratedAuthenticationFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_windows_authentications_failure_total"),
"Total number of failed OAuth Client Windows Integrated Authentications",
nil,
nil,
),
oAuthClientWindowsIntegratedAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_windows_authentications_success_total"),
"Total number of successful OAuth Client Windows Integrated Authentications",
nil,
nil,
),
oAuthLogonCertificateRequestFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_logon_certificate_requests_failure_total"),
"Total number of failed OAuth Logon Certificate Requests",
nil,
nil,
),
oAuthLogonCertificateTokenRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_logon_certificate_token_requests_success_total"),
"Total number of successful RP tokens issued for OAuth Logon Certificate Requests",
nil,
nil,
),
oAuthPasswordGrantRequestFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_password_grant_requests_failure_total"),
"Total number of failed OAuth Password Grant Requests",
nil,
nil,
),
oAuthPasswordGrantRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_password_grant_requests_success_total"),
"Total number of successful OAuth Password Grant Requests",
nil,
nil,
),
oAuthTokenRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "oauth_token_requests_success_total"),
"Total number of successful RP tokens issued over OAuth protocol",
nil,
nil,
),
samlPTokenRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "samlp_token_requests_success_total"),
"Total number of successful RP tokens issued over SAML-P protocol",
nil,
nil,
),
ssoAuthenticationFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "sso_authentications_failure_total"),
"Total number of failed SSO authentications",
nil,
nil,
),
ssoAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "sso_authentications_success_total"),
"Total number of successful SSO authentications",
nil,
nil,
),
wsfedTokenRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "wsfed_token_requests_success_total"),
"Total number of successful RP tokens issued over WS-Fed protocol",
nil,
nil,
),
wstrustTokenRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "wstrust_token_requests_success_total"),
"Total number of successful RP tokens issued over WS-Trust protocol",
nil,
nil,
),
upAuthenticationFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "userpassword_authentications_failure_total"),
"Total number of failed AD U/P authentications",
nil,
nil,
),
upAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "userpassword_authentications_success_total"),
"Total number of successful AD U/P authentications",
nil,
nil,
),
externalAuthenticationFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "external_authentications_failure_total"),
"Total number of failed authentications from external MFA providers",
nil,
nil,
),
externalAuthentications: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "external_authentications_success_total"),
"Total number of successful authentications from external MFA providers",
nil,
nil,
),
artifactDBFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "db_artifact_failure_total"),
"Total number of failures connecting to the artifact database",
nil,
nil,
),
avgArtifactDBQueryTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "db_artifact_query_time_seconds_total"),
"Accumulator of time taken for an artifact database query",
nil,
nil,
),
configDBFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "db_config_failure_total"),
"Total number of failures connecting to the configuration database",
nil,
nil,
),
avgConfigDBQueryTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "db_config_query_time_seconds_total"),
"Accumulator of time taken for a configuration database query",
nil,
nil,
),
federationMetadataRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "federation_metadata_requests_total"),
"Total number of Federation Metadata requests",
nil,
nil,
),
}, nil
}
type perflibADFS struct {
AdLoginConnectionFailures float64 `perflib:"AD Login Connection Failures"`
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
DeviceAuthentications float64 `perflib:"Device Authentications"`
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
FederatedAuthentications float64 `perflib:"Federated Authentications"`
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
PassiveRequests float64 `perflib:"Passive Requests"`
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
TokenRequests float64 `perflib:"Token Requests"`
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
OAuthAuthZRequests float64 `perflib:"OAuth AuthZ Requests"`
OAuthClientAuthentications float64 `perflib:"OAuth Client Authentications"`
OAuthClientAuthenticationFailures float64 `perflib:"OAuth Client Authentications Failures"`
OAuthClientCredentialRequestFailures float64 `perflib:"OAuth Client Credentials Request Failures"`
OAuthClientCredentialRequests float64 `perflib:"OAuth Client Credentials Requests"`
OAuthClientPrivKeyJWTAuthnFailures float64 `perflib:"OAuth Client Private Key Jwt Authentication Failures"`
OAuthClientPrivKeyJWTAuthentications float64 `perflib:"OAuth Client Private Key Jwt Authentications"`
OAuthClientBasicAuthnFailures float64 `perflib:"OAuth Client Secret Basic Authentication Failures"`
OAuthClientBasicAuthentications float64 `perflib:"OAuth Client Secret Basic Authentication Requests"`
OAuthClientSecretPostAuthnFailures float64 `perflib:"OAuth Client Secret Post Authentication Failures"`
OAuthClientSecretPostAuthentications float64 `perflib:"OAuth Client Secret Post Authentications"`
OAuthClientWindowsAuthnFailures float64 `perflib:"OAuth Client Windows Integrated Authentication Failures"`
OAuthClientWindowsAuthentications float64 `perflib:"OAuth Client Windows Integrated Authentications"`
OAuthLogonCertRequestFailures float64 `perflib:"OAuth Logon Certificate Request Failures"`
OAuthLogonCertTokenRequests float64 `perflib:"OAuth Logon Certificate Token Requests"`
OAuthPasswordGrantRequestFailures float64 `perflib:"OAuth Password Grant Request Failures"`
OAuthPasswordGrantRequests float64 `perflib:"OAuth Password Grant Requests"`
OAuthTokenRequests float64 `perflib:"OAuth Token Requests"`
SAMLPTokenRequests float64 `perflib:"SAML-P Token Requests"`
SSOAuthenticationFailures float64 `perflib:"SSO Authentication Failures"`
SSOAuthentications float64 `perflib:"SSO Authentications"`
WSFedTokenRequests float64 `perflib:"WS-Fed Token Requests"`
WSTrustTokenRequests float64 `perflib:"WS-Trust Token Requests"`
UsernamePasswordAuthnFailures float64 `perflib:"U/P Authentication Failures"`
UsernamePasswordAuthentications float64 `perflib:"U/P Authentications"`
ExternalAuthentications float64 `perflib:"External Authentications"`
ExternalAuthNFailures float64 `perflib:"External Authentication Failures"`
ArtifactDBFailures float64 `perflib:"Artifact Database Connection Failures"`
AvgArtifactDBQueryTime float64 `perflib:"Average Artifact Database Query Time"`
ConfigDBFailures float64 `perflib:"Configuration Database Connection Failures"`
AvgConfigDBQueryTime float64 `perflib:"Average Config Database Query Time"`
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
AdLoginConnectionFailures float64 `perflib:"AD login Connection Failures"`
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
DeviceAuthentications float64 `perflib:"Device Authentications"`
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
FederatedAuthentications float64 `perflib:"Federated Authentications"`
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
PassiveRequests float64 `perflib:"Passive Requests"`
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
TokenRequests float64 `perflib:"Token Requests"`
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
}
func (c *adfsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var adfsData []perflibADFS
err := unmarshalObject(ctx.perfObjects["AD FS"], &adfsData, c.logger)
err := unmarshalObject(ctx.perfObjects["AD FS"], &adfsData)
if err != nil {
return err
}
@@ -444,197 +184,5 @@ func (c *adfsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric)
prometheus.CounterValue,
adfsData[0].WindowsIntegratedAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthAuthZRequests,
prometheus.CounterValue,
adfsData[0].OAuthAuthZRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthenticationsFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequestFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientCredentialRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequests,
prometheus.CounterValue,
adfsData[0].OAuthClientCredentialRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientPrivKeyJWTAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientPrivKeyJWTAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientBasicAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientBasicAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientSecretPostAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientSecretPostAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientWindowsAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientWindowsAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateRequestFailures,
prometheus.CounterValue,
adfsData[0].OAuthLogonCertRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateTokenRequests,
prometheus.CounterValue,
adfsData[0].OAuthLogonCertTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequestFailures,
prometheus.CounterValue,
adfsData[0].OAuthPasswordGrantRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequests,
prometheus.CounterValue,
adfsData[0].OAuthPasswordGrantRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthTokenRequests,
prometheus.CounterValue,
adfsData[0].OAuthTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.samlPTokenRequests,
prometheus.CounterValue,
adfsData[0].SAMLPTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].SSOAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthentications,
prometheus.CounterValue,
adfsData[0].SSOAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.wsfedTokenRequests,
prometheus.CounterValue,
adfsData[0].WSFedTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.wstrustTokenRequests,
prometheus.CounterValue,
adfsData[0].WSTrustTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].UsernamePasswordAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthentications,
prometheus.CounterValue,
adfsData[0].UsernamePasswordAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].ExternalAuthNFailures,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthentications,
prometheus.CounterValue,
adfsData[0].ExternalAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.artifactDBFailures,
prometheus.CounterValue,
adfsData[0].ArtifactDBFailures,
)
ch <- prometheus.MustNewConstMetric(
c.avgArtifactDBQueryTime,
prometheus.CounterValue,
adfsData[0].AvgArtifactDBQueryTime*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.configDBFailures,
prometheus.CounterValue,
adfsData[0].ConfigDBFailures,
)
ch <- prometheus.MustNewConstMetric(
c.avgConfigDBQueryTime,
prometheus.CounterValue,
adfsData[0].AvgConfigDBQueryTime*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.federationMetadataRequests,
prometheus.CounterValue,
adfsData[0].FederationMetadataRequests,
)
return nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkADFSCollector(b *testing.B) {
benchmarkCollector(b, "adfs", newADFSCollector)
}

View File

@@ -1,454 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// A CacheCollector is a Prometheus collector for Perflib Cache metrics
type CacheCollector struct {
logger log.Logger
AsyncCopyReadsTotal *prometheus.Desc
AsyncDataMapsTotal *prometheus.Desc
AsyncFastReadsTotal *prometheus.Desc
AsyncMDLReadsTotal *prometheus.Desc
AsyncPinReadsTotal *prometheus.Desc
CopyReadHitsTotal *prometheus.Desc
CopyReadsTotal *prometheus.Desc
DataFlushesTotal *prometheus.Desc
DataFlushPagesTotal *prometheus.Desc
DataMapHitsPercent *prometheus.Desc
DataMapPinsTotal *prometheus.Desc
DataMapsTotal *prometheus.Desc
DirtyPages *prometheus.Desc
DirtyPageThreshold *prometheus.Desc
FastReadNotPossiblesTotal *prometheus.Desc
FastReadResourceMissesTotal *prometheus.Desc
FastReadsTotal *prometheus.Desc
LazyWriteFlushesTotal *prometheus.Desc
LazyWritePagesTotal *prometheus.Desc
MDLReadHitsTotal *prometheus.Desc
MDLReadsTotal *prometheus.Desc
PinReadHitsTotal *prometheus.Desc
PinReadsTotal *prometheus.Desc
ReadAheadsTotal *prometheus.Desc
SyncCopyReadsTotal *prometheus.Desc
SyncDataMapsTotal *prometheus.Desc
SyncFastReadsTotal *prometheus.Desc
SyncMDLReadsTotal *prometheus.Desc
SyncPinReadsTotal *prometheus.Desc
}
// NewCacheCollector ...
func newCacheCollector(logger log.Logger) (Collector, error) {
const subsystem = "cache"
return &CacheCollector{
logger: log.With(logger, "collector", subsystem),
AsyncCopyReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)",
nil,
nil,
),
AsyncDataMapsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "async_data_maps_total"),
"(AsyncDataMapsTotal)",
nil,
nil,
),
AsyncFastReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "async_fast_reads_total"),
"(AsyncFastReadsTotal)",
nil,
nil,
),
AsyncMDLReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "async_mdl_reads_total"),
"(AsyncMDLReadsTotal)",
nil,
nil,
),
AsyncPinReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "async_pin_reads_total"),
"(AsyncPinReadsTotal)",
nil,
nil,
),
CopyReadHitsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "copy_read_hits_total"),
"(CopyReadHitsTotal)",
nil,
nil,
),
CopyReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "copy_reads_total"),
"(CopyReadsTotal)",
nil,
nil,
),
DataFlushesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "data_flushes_total"),
"(DataFlushesTotal)",
nil,
nil,
),
DataFlushPagesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "data_flush_pages_total"),
"(DataFlushPagesTotal)",
nil,
nil,
),
DataMapHitsPercent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "data_map_hits_percent"),
"(DataMapHitsPercent)",
nil,
nil,
),
DataMapPinsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "data_map_pins_total"),
"(DataMapPinsTotal)",
nil,
nil,
),
DataMapsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "data_maps_total"),
"(DataMapsTotal)",
nil,
nil,
),
DirtyPages: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "dirty_pages"),
"(DirtyPages)",
nil,
nil,
),
DirtyPageThreshold: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "dirty_page_threshold"),
"(DirtyPageThreshold)",
nil,
nil,
),
FastReadNotPossiblesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "fast_read_not_possibles_total"),
"(FastReadNotPossiblesTotal)",
nil,
nil,
),
FastReadResourceMissesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "fast_read_resource_misses_total"),
"(FastReadResourceMissesTotal)",
nil,
nil,
),
FastReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "fast_reads_total"),
"(FastReadsTotal)",
nil,
nil,
),
LazyWriteFlushesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "lazy_write_flushes_total"),
"(LazyWriteFlushesTotal)",
nil,
nil,
),
LazyWritePagesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "lazy_write_pages_total"),
"(LazyWritePagesTotal)",
nil,
nil,
),
MDLReadHitsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mdl_read_hits_total"),
"(MDLReadHitsTotal)",
nil,
nil,
),
MDLReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mdl_reads_total"),
"(MDLReadsTotal)",
nil,
nil,
),
PinReadHitsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pin_read_hits_total"),
"(PinReadHitsTotal)",
nil,
nil,
),
PinReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pin_reads_total"),
"(PinReadsTotal)",
nil,
nil,
),
ReadAheadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "read_aheads_total"),
"(ReadAheadsTotal)",
nil,
nil,
),
SyncCopyReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "sync_copy_reads_total"),
"(SyncCopyReadsTotal)",
nil,
nil,
),
SyncDataMapsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "sync_data_maps_total"),
"(SyncDataMapsTotal)",
nil,
nil,
),
SyncFastReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "sync_fast_reads_total"),
"(SyncFastReadsTotal)",
nil,
nil,
),
SyncMDLReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "sync_mdl_reads_total"),
"(SyncMDLReadsTotal)",
nil,
nil,
),
SyncPinReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "sync_pin_reads_total"),
"(SyncPinReadsTotal)",
nil,
nil,
),
}, nil
}
// Collect implements the Collector interface
func (c *CacheCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cache metrics", "desc", desc, "err", err)
return err
}
return nil
}
// Perflib "Cache":
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
type perflibCache struct {
AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"`
AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"`
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
DataMapsTotal float64 `perflib:"Data Maps/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
FastReadsTotal float64 `perflib:"Fast Reads/sec"`
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
PinReadsTotal float64 `perflib:"Pin Reads/sec"`
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"`
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
}
func (c *CacheCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []perflibCache // Single-instance class, array is required but will have single entry.
if err := unmarshalObject(ctx.perfObjects["Cache"], &dst, c.logger); err != nil {
return nil, err
}
ch <- prometheus.MustNewConstMetric(
c.AsyncCopyReadsTotal,
prometheus.CounterValue,
dst[0].AsyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncDataMapsTotal,
prometheus.CounterValue,
dst[0].AsyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncFastReadsTotal,
prometheus.CounterValue,
dst[0].AsyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncMDLReadsTotal,
prometheus.CounterValue,
dst[0].AsyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncPinReadsTotal,
prometheus.CounterValue,
dst[0].AsyncPinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.CopyReadHitsTotal,
prometheus.GaugeValue,
dst[0].CopyReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.CopyReadsTotal,
prometheus.CounterValue,
dst[0].CopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataFlushesTotal,
prometheus.CounterValue,
dst[0].DataFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataFlushPagesTotal,
prometheus.CounterValue,
dst[0].DataFlushPagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataMapHitsPercent,
prometheus.GaugeValue,
dst[0].DataMapHitsPercent,
)
ch <- prometheus.MustNewConstMetric(
c.DataMapPinsTotal,
prometheus.CounterValue,
dst[0].DataMapPinsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataMapsTotal,
prometheus.CounterValue,
dst[0].DataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DirtyPages,
prometheus.GaugeValue,
dst[0].DirtyPages,
)
ch <- prometheus.MustNewConstMetric(
c.DirtyPageThreshold,
prometheus.GaugeValue,
dst[0].DirtyPageThreshold,
)
ch <- prometheus.MustNewConstMetric(
c.FastReadNotPossiblesTotal,
prometheus.CounterValue,
dst[0].FastReadNotPossiblesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FastReadResourceMissesTotal,
prometheus.CounterValue,
dst[0].FastReadResourceMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FastReadsTotal,
prometheus.CounterValue,
dst[0].FastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.LazyWriteFlushesTotal,
prometheus.CounterValue,
dst[0].LazyWriteFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.LazyWritePagesTotal,
prometheus.CounterValue,
dst[0].LazyWritePagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.MDLReadHitsTotal,
prometheus.CounterValue,
dst[0].MDLReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.MDLReadsTotal,
prometheus.CounterValue,
dst[0].MDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.PinReadHitsTotal,
prometheus.CounterValue,
dst[0].PinReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.PinReadsTotal,
prometheus.CounterValue,
dst[0].PinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ReadAheadsTotal,
prometheus.CounterValue,
dst[0].ReadAheadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncCopyReadsTotal,
prometheus.CounterValue,
dst[0].SyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncDataMapsTotal,
prometheus.CounterValue,
dst[0].SyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncFastReadsTotal,
prometheus.CounterValue,
dst[0].SyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncMDLReadsTotal,
prometheus.CounterValue,
dst[0].SyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncPinReadsTotal,
prometheus.CounterValue,
dst[0].SyncPinReadsTotal,
)
return nil, nil
}

View File

@@ -1,24 +1,19 @@
package collector
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/prometheus-community/windows_exporter/perflib"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/leoluk/perflib_exporter/perflib"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"golang.org/x/sys/windows/registry"
)
// ...
const (
// TODO: Make package-local
Namespace = "windows"
Namespace = "wmi"
// Conversion factors
ticksToSecondsScaleFactor = 1 / 1e7
@@ -27,35 +22,33 @@ const (
// getWindowsVersion reads the version number of the OS from the Registry
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
func getWindowsVersion(logger log.Logger) float64 {
func getWindowsVersion() float64 {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
if err != nil {
_ = level.Warn(logger).Log("msg", "Couldn't open registry", "err", err)
log.Warn("Couldn't open registry", err)
return 0
}
defer func() {
err = k.Close()
if err != nil {
_ = level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
log.Warnf("Failed to close registry key: %v", err)
}
}()
currentv, _, err := k.GetStringValue("CurrentVersion")
if err != nil {
_ = level.Warn(logger).Log("msg", "Couldn't open registry to determine current Windows version", "err", err)
log.Warn("Couldn't open registry to determine current Windows version:", err)
return 0
}
currentv_flt, err := strconv.ParseFloat(currentv, 64)
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Detected Windows version %f\n", currentv_flt))
log.Debugf("Detected Windows version %f\n", currentv_flt)
return currentv_flt
}
type collectorBuilder func(log.Logger) (Collector, error)
type flagsBuilder func(*kingpin.Application)
type perfCounterNamesBuilder func(log.Logger) []string
type collectorBuilder func() (Collector, error)
var (
builders = make(map[string]collectorBuilder)
@@ -64,10 +57,6 @@ var (
func registerCollector(name string, builder collectorBuilder, perfCounterNames ...string) {
builders[name] = builder
addPerfCounterDependencies(name, perfCounterNames)
}
func addPerfCounterDependencies(name string, perfCounterNames []string) {
perfIndicies := make([]string, 0, len(perfCounterNames))
for _, cn := range perfCounterNames {
perfIndicies = append(perfIndicies, MapCounterToIndex(cn))
@@ -82,12 +71,8 @@ func Available() []string {
}
return cs
}
func Build(collector string, logger log.Logger) (Collector, error) {
builder, exists := builders[collector]
if !exists {
return nil, fmt.Errorf("Unknown collector %q", collector)
}
return builder(logger)
func Build(collector string) (Collector, error) {
return builders[collector]()
}
func getPerfQuery(collectors []string) string {
parts := make([]string, 0, len(collectors))
@@ -119,41 +104,3 @@ func PrepareScrapeContext(collectors []string) (*ScrapeContext, error) {
return &ScrapeContext{objs}, nil
}
func boolToFloat(b bool) float64 {
if b {
return 1.0
}
return 0.0
}
func find(slice []string, val string) bool {
for _, item := range slice {
if item == val {
return true
}
}
return false
}
// Used by more complex collectors where user input specifies enabled child collectors.
// Splits provided child collectors and deduplicate.
func expandEnabledChildCollectors(enabled string) []string {
separated := strings.Split(enabled, ",")
unique := map[string]bool{}
for _, s := range separated {
if s != "" {
unique[s] = true
}
}
result := make([]string, 0, len(unique))
for s := range unique {
result = append(result, s)
}
// Ensure result is ordered, to prevent test failure
sort.Strings(result)
return result
}
func milliSecToSec(t float64) float64 {
return t / 1000
}

View File

@@ -1,61 +0,0 @@
package collector
import (
"reflect"
"testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
func TestExpandChildCollectors(t *testing.T) {
cases := []struct {
name string
input string
expectedOutput []string
}{
{
name: "simple",
input: "testing1,testing2,testing3",
expectedOutput: []string{"testing1", "testing2", "testing3"},
},
{
name: "duplicate",
input: "testing1,testing2,testing2,testing3",
expectedOutput: []string{"testing1", "testing2", "testing3"},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
output := expandEnabledChildCollectors(c.input)
if !reflect.DeepEqual(output, c.expectedOutput) {
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
}
})
}
}
func benchmarkCollector(b *testing.B, name string, collectFunc func(logger log.Logger) (Collector, error)) {
// Create perflib scrape context. Some perflib collectors required a correct context,
// or will fail during benchmark.
scrapeContext, err := PrepareScrapeContext([]string{name})
if err != nil {
b.Error(err)
}
c, err := collectFunc(log.NewNopLogger())
if err != nil {
b.Error(err)
}
metrics := make(chan prometheus.Metric)
go func() {
for {
<-metrics
}
}()
for i := 0; i < b.N; i++ {
c.Collect(scrapeContext, metrics) //nolint:errcheck
}
}

View File

@@ -1,22 +1,19 @@
//go:build windows
// +build windows
package collector
import (
"fmt"
"strings"
"github.com/Microsoft/hcsshim"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("container", NewContainerMetricsCollector)
}
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
type ContainerMetricsCollector struct {
logger log.Logger
// Presence
ContainerAvailable *prometheus.Desc
@@ -39,20 +36,12 @@ type ContainerMetricsCollector struct {
PacketsSent *prometheus.Desc
DroppedPacketsIncoming *prometheus.Desc
DroppedPacketsOutgoing *prometheus.Desc
// Storage
ReadCountNormalized *prometheus.Desc
ReadSizeBytes *prometheus.Desc
WriteCountNormalized *prometheus.Desc
WriteSizeBytes *prometheus.Desc
}
// newContainerMetricsCollector constructs a new ContainerMetricsCollector
func newContainerMetricsCollector(logger log.Logger) (Collector, error) {
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
func NewContainerMetricsCollector() (Collector, error) {
const subsystem = "container"
return &ContainerMetricsCollector{
logger: log.With(logger, "collector", subsystem),
ContainerAvailable: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "available"),
"Available",
@@ -137,30 +126,6 @@ func newContainerMetricsCollector(logger log.Logger) (Collector, error) {
[]string{"container_id", "interface"},
nil,
),
ReadCountNormalized: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "storage_read_count_normalized_total"),
"Read Count Normalized",
[]string{"container_id"},
nil,
),
ReadSizeBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "storage_read_size_bytes_total"),
"Read Size Bytes",
[]string{"container_id"},
nil,
),
WriteCountNormalized: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "storage_write_count_normalized_total"),
"Write Count Normalized",
[]string{"container_id"},
nil,
),
WriteSizeBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "storage_write_size_bytes_total"),
"Write Size Bytes",
[]string{"container_id"},
nil,
),
}, nil
}
@@ -168,17 +133,17 @@ func newContainerMetricsCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ContainerMetricsCollector metrics", "desc", desc, "err", err)
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
return err
}
return nil
}
// containerClose closes the container resource
func (c *ContainerMetricsCollector) containerClose(container hcsshim.Container) {
err := container.Close()
func containerClose(c hcsshim.Container) {
err := c.Close()
if err != nil {
_ = level.Error(c.logger).Log("err", err)
log.Error(err)
}
}
@@ -187,7 +152,7 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
// Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil {
_ = level.Error(c.logger).Log("msg", "Err in Getting containers", "err", err)
log.Error("Err in Getting containers:", err)
return nil, err
}
@@ -202,171 +167,116 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
return nil, nil
}
containerPrefixes := make(map[string]string)
for _, containerDetails := range containers {
container, err := hcsshim.OpenContainer(containerDetails.ID)
containerId := containerDetails.ID
container, err := hcsshim.OpenContainer(containerId)
if container != nil {
defer c.containerClose(container)
defer containerClose(container)
}
if err != nil {
_ = level.Error(c.logger).Log("msg", "err in opening container", "containerId", containerDetails.ID, "err", err)
log.Error("err in opening container: ", containerId, err)
continue
}
cstats, err := container.Statistics()
if err != nil {
_ = level.Error(c.logger).Log("msg", "err in fetching container Statistics", "containerId", containerDetails.ID, "err", err)
log.Error("err in fetching container Statistics: ", containerId, err)
continue
}
containerIdWithPrefix := getContainerIdWithPrefix(containerDetails)
containerPrefixes[containerDetails.ID] = containerIdWithPrefix
// HCS V1 is for docker runtime. Add the docker:// prefix on container_id
containerId = "docker://" + containerId
ch <- prometheus.MustNewConstMetric(
c.ContainerAvailable,
prometheus.CounterValue,
1,
containerIdWithPrefix,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.UsageCommitBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsageCommitBytes),
containerIdWithPrefix,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.UsageCommitPeakBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsageCommitPeakBytes),
containerIdWithPrefix,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.UsagePrivateWorkingSetBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
containerIdWithPrefix,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeTotal,
prometheus.CounterValue,
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
containerIdWithPrefix,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeUser,
prometheus.CounterValue,
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
containerIdWithPrefix,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeKernel,
prometheus.CounterValue,
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
containerIdWithPrefix,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.ReadCountNormalized,
prometheus.CounterValue,
float64(cstats.Storage.ReadCountNormalized),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.ReadSizeBytes,
prometheus.CounterValue,
float64(cstats.Storage.ReadSizeBytes),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.WriteCountNormalized,
prometheus.CounterValue,
float64(cstats.Storage.WriteCountNormalized),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.WriteSizeBytes,
prometheus.CounterValue,
float64(cstats.Storage.WriteSizeBytes),
containerIdWithPrefix,
)
}
hnsEndpoints, err := hcsshim.HNSListEndpointRequest()
if err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to collect network stats for containers")
return nil, nil
}
if len(hnsEndpoints) == 0 {
_ = level.Info(c.logger).Log("msg", fmt.Sprintf("No network stats for containers to collect"))
return nil, nil
}
for _, endpoint := range hnsEndpoints {
endpointStats, err := hcsshim.GetHNSEndpointStats(endpoint.Id)
if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Failed to collect network stats for interface %s", endpoint.Id), "err", err)
if len(cstats.Network) == 0 {
log.Info("No Network Stats for container: ", containerId)
continue
}
for _, containerId := range endpoint.SharedContainers {
containerIdWithPrefix, ok := containerPrefixes[containerId]
endpointId := strings.ToUpper(endpoint.Id)
if !ok {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Failed to collect network stats for container %s", containerId))
continue
}
networkStats := cstats.Network
for _, networkInterface := range networkStats {
ch <- prometheus.MustNewConstMetric(
c.BytesReceived,
prometheus.CounterValue,
float64(endpointStats.BytesReceived),
containerIdWithPrefix, endpointId,
float64(networkInterface.BytesReceived),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.BytesSent,
prometheus.CounterValue,
float64(endpointStats.BytesSent),
containerIdWithPrefix, endpointId,
float64(networkInterface.BytesSent),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceived,
prometheus.CounterValue,
float64(endpointStats.PacketsReceived),
containerIdWithPrefix, endpointId,
float64(networkInterface.PacketsReceived),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsSent,
prometheus.CounterValue,
float64(endpointStats.PacketsSent),
containerIdWithPrefix, endpointId,
float64(networkInterface.PacketsSent),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.DroppedPacketsIncoming,
prometheus.CounterValue,
float64(endpointStats.DroppedPacketsIncoming),
containerIdWithPrefix, endpointId,
float64(networkInterface.DroppedPacketsIncoming),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.DroppedPacketsOutgoing,
prometheus.CounterValue,
float64(endpointStats.DroppedPacketsOutgoing),
containerIdWithPrefix, endpointId,
float64(networkInterface.DroppedPacketsOutgoing),
containerId, networkInterface.EndpointId,
)
break
}
}
return nil, nil
}
func getContainerIdWithPrefix(containerDetails hcsshim.ContainerProperties) string {
switch containerDetails.Owner {
case "containerd-shim-runhcs-v1.exe":
return "containerd://" + containerDetails.ID
default:
// default to docker or if owner is not set
return "docker://" + containerDetails.ID
}
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkContainerCollector(b *testing.B) {
benchmarkCollector(b, "container", newContainerMetricsCollector)
}

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package collector
@@ -6,21 +5,27 @@ package collector
import (
"strings"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type cpuCollectorBasic struct {
logger log.Logger
func init() {
var deps string
// See below for 6.05 magic value
if getWindowsVersion() > 6.05 {
deps = "Processor Information"
} else {
deps = "Processor"
}
registerCollector("cpu", newCPUCollector, deps)
}
type cpuCollectorBasic struct {
CStateSecondsTotal *prometheus.Desc
TimeTotal *prometheus.Desc
InterruptsTotal *prometheus.Desc
DPCsTotal *prometheus.Desc
}
type cpuCollectorFull struct {
logger log.Logger
CStateSecondsTotal *prometheus.Desc
TimeTotal *prometheus.Desc
InterruptsTotal *prometheus.Desc
@@ -31,18 +36,13 @@ type cpuCollectorFull struct {
ProcessorFrequencyMHz *prometheus.Desc
ProcessorMaxFrequencyMHz *prometheus.Desc
ProcessorPerformance *prometheus.Desc
ProcessorMPerf *prometheus.Desc
ProcessorRTC *prometheus.Desc
ProcessorUtility *prometheus.Desc
ProcessorPrivUtility *prometheus.Desc
}
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
func newCPUCollector(logger log.Logger) (Collector, error) {
func newCPUCollector() (Collector, error) {
const subsystem = "cpu"
logger = log.With(logger, "collector", subsystem)
version := getWindowsVersion(logger)
version := getWindowsVersion()
// For Windows 2008 (version 6.0) or earlier we only have the "Processor"
// class. As of Windows 2008 R2 (version 6.1) the more detailed
// "Processor Information" set is available (although some of the counters
@@ -51,7 +51,6 @@ func newCPUCollector(logger log.Logger) (Collector, error) {
// Value 6.05 was selected to split between Windows versions.
if version < 6.05 {
return &cpuCollectorBasic{
logger: logger,
CStateSecondsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
"Time spent in low-power idle state",
@@ -60,7 +59,7 @@ func newCPUCollector(logger log.Logger) (Collector, error) {
),
TimeTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
"Time that processor spent in different modes (idle, user, system, ...)",
[]string{"core", "mode"},
nil,
),
@@ -80,7 +79,6 @@ func newCPUCollector(logger log.Logger) (Collector, error) {
}
return &cpuCollectorFull{
logger: logger,
CStateSecondsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
"Time spent in low-power idle state",
@@ -89,7 +87,7 @@ func newCPUCollector(logger log.Logger) (Collector, error) {
),
TimeTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
"Time that processor spent in different modes (idle, user, system, ...)",
[]string{"core", "mode"},
nil,
),
@@ -130,35 +128,11 @@ func newCPUCollector(logger log.Logger) (Collector, error) {
nil,
),
ProcessorPerformance: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "processor_performance_total"),
prometheus.BuildFQName(Namespace, subsystem, "processor_performance"),
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
[]string{"core"},
nil,
),
ProcessorMPerf: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "processor_mperf_total"),
"Processor MPerf is the number of TSC ticks incremented while executing instructions",
[]string{"core"},
nil,
),
ProcessorRTC: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "processor_rtc_total"),
"Processor RTC represents the number of RTC ticks made since the system booted. It should consistently be 64e6, and can be used to properly derive Processor Utility Rate",
[]string{"core"},
nil,
),
ProcessorUtility: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "processor_utility_total"),
"Processor Utility represents is the amount of time the core spends executing instructions",
[]string{"core"},
nil,
),
ProcessorPrivUtility: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "processor_privileged_utility_total"),
"Processor Privilieged Utility represents is the amount of time the core has spent executing instructions inside the kernel",
[]string{"core"},
nil,
),
}, nil
}
@@ -183,7 +157,7 @@ type perflibProcessor struct {
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessor, 0)
err := unmarshalObject(ctx.perfObjects["Processor"], &data, c.logger)
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
if err != nil {
return err
}
@@ -283,16 +257,14 @@ type perflibProcessorInformation struct {
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
ProcessorPerformance float64 `perflib:"% Processor Performance"`
ProcessorMPerf float64 `perflib:"% Processor Performance,secondvalue"`
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
ProcessorRTC float64 `perflib:"% Processor Utility,secondvalue"`
UserTimeSeconds float64 `perflib:"% User Time"`
}
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessorInformation, 0)
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data, c.logger)
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
if err != nil {
return err
}
@@ -393,34 +365,10 @@ func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorPerformance,
prometheus.CounterValue,
prometheus.GaugeValue,
cpu.ProcessorPerformance,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorMPerf,
prometheus.CounterValue,
cpu.ProcessorMPerf,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorRTC,
prometheus.CounterValue,
cpu.ProcessorRTC,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorUtility,
prometheus.CounterValue,
cpu.ProcessorUtilityRate,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorPrivUtility,
prometheus.CounterValue,
cpu.PrivilegedUtilitySeconds,
core,
)
}
return nil

View File

@@ -1,99 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"strconv"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// If you are adding additional labels to the metric, make sure that they get added in here as well. See below for explanation.
const (
win32ProcessorQuery = "SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name FROM Win32_Processor"
)
// A CpuInfoCollector is a Prometheus collector for a few WMI metrics in Win32_Processor
type CpuInfoCollector struct {
logger log.Logger
CpuInfo *prometheus.Desc
}
func newCpuInfoCollector(logger log.Logger) (Collector, error) {
const subsystem = "cpu_info"
return &CpuInfoCollector{
logger: log.With(logger, "collector", subsystem),
CpuInfo: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", subsystem),
"Labeled CPU information as provided provided by Win32_Processor",
[]string{
"architecture",
"device_id",
"description",
"family",
"l2_cache_size",
"l3_cache_size",
"name"},
nil,
),
}, nil
}
type win32_Processor struct {
Architecture uint32
DeviceID string
Description string
Family uint16
L2CacheSize uint32
L3CacheSize uint32
Name string
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *CpuInfoCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "desc", desc, "err", err)
return err
}
return nil
}
func (c *CpuInfoCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_Processor
// We use a static query here because the provided methods in wmi.go all issue a SELECT *;
// This results in the time consuming LoadPercentage field being read which seems to measure each CPU
// serially over a 1 second interval, so the scrape time is at least 1s * num_sockets
if err := wmi.Query(win32ProcessorQuery, &dst); err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
// Some CPUs end up exposing trailing spaces for certain strings, so clean them up
for _, processor := range dst {
ch <- prometheus.MustNewConstMetric(
c.CpuInfo,
prometheus.GaugeValue,
1.0,
strconv.Itoa(int(processor.Architecture)),
strings.TrimRight(processor.DeviceID, " "),
strings.TrimRight(processor.Description, " "),
strconv.Itoa(int(processor.Family)),
strconv.Itoa(int(processor.L2CacheSize)),
strconv.Itoa(int(processor.L3CacheSize)),
strings.TrimRight(processor.Name, " "),
)
}
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkCPUCollector(b *testing.B) {
benchmarkCollector(b, "cpu", newCPUCollector)
}

View File

@@ -1,31 +1,31 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/headers/sysinfoapi"
"errors"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("cs", NewCSCollector)
}
// A CSCollector is a Prometheus collector for WMI metrics
type CSCollector struct {
logger log.Logger
PhysicalMemoryBytes *prometheus.Desc
LogicalProcessors *prometheus.Desc
Hostname *prometheus.Desc
}
// newCSCollector ...
func newCSCollector(logger log.Logger) (Collector, error) {
// NewCSCollector ...
func NewCSCollector() (Collector, error) {
const subsystem = "cs"
return &CSCollector{
logger: log.With(logger, "collector", subsystem),
LogicalProcessors: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "logical_processors"),
"ComputerSystem.NumberOfLogicalProcessors",
@@ -54,53 +54,57 @@ func newCSCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cs metrics", "desc", desc, "err", err)
log.Error("failed collecting cs metrics:", desc, err)
return err
}
return nil
}
func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
// Get systeminfo for number of processors
systemInfo := sysinfoapi.GetSystemInfo()
// Win32_ComputerSystem docs:
// - https://msdn.microsoft.com/en-us/library/aa394102
type Win32_ComputerSystem struct {
NumberOfLogicalProcessors uint32
TotalPhysicalMemory uint64
DNSHostname string
Domain string
Workgroup *string
}
// Get memory status for physical memory
mem, err := sysinfoapi.GlobalMemoryStatusEx()
if err != nil {
func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_ComputerSystem
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.LogicalProcessors,
prometheus.GaugeValue,
float64(systemInfo.NumberOfProcessors),
float64(dst[0].NumberOfLogicalProcessors),
)
ch <- prometheus.MustNewConstMetric(
c.PhysicalMemoryBytes,
prometheus.GaugeValue,
float64(mem.TotalPhys),
float64(dst[0].TotalPhysicalMemory),
)
hostname, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSHostname)
if err != nil {
return nil, err
}
domain, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSDomain)
if err != nil {
return nil, err
}
fqdn, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSFullyQualified)
if err != nil {
return nil, err
var fqdn string
if dst[0].Workgroup == nil || dst[0].Domain != *dst[0].Workgroup {
fqdn = dst[0].DNSHostname + "." + dst[0].Domain
} else {
fqdn = dst[0].DNSHostname
}
ch <- prometheus.MustNewConstMetric(
c.Hostname,
prometheus.GaugeValue,
1.0,
hostname,
domain,
dst[0].DNSHostname,
dst[0].Domain,
fqdn,
)

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkCsCollector(b *testing.B) {
benchmarkCollector(b, "cs", newCSCollector)
}

View File

@@ -1,815 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
const (
FlagDfsrEnabledCollectors = "collectors.dfsr.sources-enabled"
)
var dfsrEnabledCollectors *string
// DFSRCollector contains the metric and state data of the DFSR collectors.
type DFSRCollector struct {
logger log.Logger
// Connection source
ConnectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
ConnectionBytesReceivedTotal *prometheus.Desc
ConnectionCompressedSizeOfFilesReceivedTotal *prometheus.Desc
ConnectionFilesReceivedTotal *prometheus.Desc
ConnectionRDCBytesReceivedTotal *prometheus.Desc
ConnectionRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
ConnectionRDCSizeOfFilesReceivedTotal *prometheus.Desc
ConnectionRDCNumberofFilesReceivedTotal *prometheus.Desc
ConnectionSizeOfFilesReceivedTotal *prometheus.Desc
// Folder source
FolderBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
FolderCompressedSizeOfFilesReceivedTotal *prometheus.Desc
FolderConflictBytesCleanedupTotal *prometheus.Desc
FolderConflictBytesGeneratedTotal *prometheus.Desc
FolderConflictFilesCleanedUpTotal *prometheus.Desc
FolderConflictFilesGeneratedTotal *prometheus.Desc
FolderConflictFolderCleanupsCompletedTotal *prometheus.Desc
FolderConflictSpaceInUse *prometheus.Desc
FolderDeletedSpaceInUse *prometheus.Desc
FolderDeletedBytesCleanedUpTotal *prometheus.Desc
FolderDeletedBytesGeneratedTotal *prometheus.Desc
FolderDeletedFilesCleanedUpTotal *prometheus.Desc
FolderDeletedFilesGeneratedTotal *prometheus.Desc
FolderFileInstallsRetriedTotal *prometheus.Desc
FolderFileInstallsSucceededTotal *prometheus.Desc
FolderFilesReceivedTotal *prometheus.Desc
FolderRDCBytesReceivedTotal *prometheus.Desc
FolderRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
FolderRDCNumberofFilesReceivedTotal *prometheus.Desc
FolderRDCSizeOfFilesReceivedTotal *prometheus.Desc
FolderSizeOfFilesReceivedTotal *prometheus.Desc
FolderStagingSpaceInUse *prometheus.Desc
FolderStagingBytesCleanedUpTotal *prometheus.Desc
FolderStagingBytesGeneratedTotal *prometheus.Desc
FolderStagingFilesCleanedUpTotal *prometheus.Desc
FolderStagingFilesGeneratedTotal *prometheus.Desc
FolderUpdatesDroppedTotal *prometheus.Desc
// Volume source
VolumeDatabaseLookupsTotal *prometheus.Desc
VolumeDatabaseCommitsTotal *prometheus.Desc
VolumeUSNJournalUnreadPercentage *prometheus.Desc
VolumeUSNJournalRecordsAcceptedTotal *prometheus.Desc
VolumeUSNJournalRecordsReadTotal *prometheus.Desc
// Map of child collector functions used during collection
dfsrChildCollectors []dfsrCollectorFunc
}
type dfsrCollectorFunc func(ctx *ScrapeContext, ch chan<- prometheus.Metric) error
// Map Perflib sources to DFSR collector names
// E.G. volume -> DFS Replication Service Volumes
func dfsrGetPerfObjectName(collector string) string {
prefix := "DFS "
suffix := ""
switch collector {
case "connection":
suffix = "Replication Connections"
case "folder":
suffix = "Replicated Folders"
case "volume":
suffix = "Replication Service Volumes"
}
return (prefix + suffix)
}
// newDFSRCollectorFlags is registered
func newDFSRCollectorFlags(app *kingpin.Application) {
dfsrEnabledCollectors = app.Flag(FlagDfsrEnabledCollectors, "Comma-seperated list of DFSR Perflib sources to use.").Default("connection,folder,volume").String()
}
// newDFSRCollector is registered
func newDFSRCollector(logger log.Logger) (Collector, error) {
const subsystem = "dfsr"
logger = log.With(logger, "collector", subsystem)
_ = level.Info(logger).Log("msg", "dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
enabled := expandEnabledChildCollectors(*dfsrEnabledCollectors)
perfCounters := make([]string, 0, len(enabled))
for _, c := range enabled {
perfCounters = append(perfCounters, dfsrGetPerfObjectName(c))
}
addPerfCounterDependencies(subsystem, perfCounters)
dfsrCollector := DFSRCollector{
logger: logger,
// Connection
ConnectionBandwidthSavingsUsingDFSReplicationTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
"Total bytes of bandwidth saved using DFS Replication for this connection",
[]string{"name"},
nil,
),
ConnectionBytesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_bytes_received_total"),
"Total bytes received for connection",
[]string{"name"},
nil,
),
ConnectionCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_compressed_size_of_files_received_bytes_total"),
"Total compressed size of files received on the connection, in bytes",
[]string{"name"},
nil,
),
ConnectionFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_received_files_total"),
"Total number of files received for connection",
[]string{"name"},
nil,
),
ConnectionRDCBytesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_received_bytes_total"),
"Total bytes received on the connection while replicating files using Remote Differential Compression",
[]string{"name"},
nil,
),
ConnectionRDCCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_compressed_size_of_received_files_bytes_total"),
"Total uncompressed size of files received with Remote Differential Compression for connection",
[]string{"name"},
nil,
),
ConnectionRDCNumberofFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_received_files_total"),
"Total number of files received using remote differential compression",
[]string{"name"},
nil,
),
ConnectionRDCSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_size_of_received_files_bytes_total"),
"Total size of received Remote Differential Compression files, in bytes.",
[]string{"name"},
nil,
),
ConnectionSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_files_received_bytes_total"),
"Total size of files received, in bytes",
[]string{"name"},
nil,
),
// Folder
FolderBandwidthSavingsUsingDFSReplicationTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_bandwidth_savings_using_dfs_replication_bytes_total"),
"Total bytes of bandwidth saved using DFS Replication for this folder",
[]string{"name"},
nil,
),
FolderCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_compressed_size_of_received_files_bytes_total"),
"Total compressed size of files received on the folder, in bytes",
[]string{"name"},
nil,
),
FolderConflictBytesCleanedupTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_cleaned_up_bytes_total"),
"Total size of conflict loser files and folders deleted from the Conflict and Deleted folder, in bytes",
[]string{"name"},
nil,
),
FolderConflictBytesGeneratedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_generated_bytes_total"),
"Total size of conflict loser files and folders moved to the Conflict and Deleted folder, in bytes",
[]string{"name"},
nil,
),
FolderConflictFilesCleanedUpTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_cleaned_up_files_total"),
"Number of conflict loser files deleted from the Conflict and Deleted folder",
[]string{"name"},
nil,
),
FolderConflictFilesGeneratedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_generated_files_total"),
"Number of files and folders moved to the Conflict and Deleted folder",
[]string{"name"},
nil,
),
FolderConflictFolderCleanupsCompletedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_folder_cleanups_total"),
"Number of deletions of conflict loser files and folders in the Conflict and Deleted",
[]string{"name"},
nil,
),
FolderConflictSpaceInUse: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_space_in_use_bytes"),
"Total size of the conflict loser files and folders currently in the Conflict and Deleted folder",
[]string{"name"},
nil,
),
FolderDeletedSpaceInUse: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_space_in_use_bytes"),
"Total size (in bytes) of the deleted files and folders currently in the Conflict and Deleted folder",
[]string{"name"},
nil,
),
FolderDeletedBytesCleanedUpTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_cleaned_up_bytes_total"),
"Total size (in bytes) of replicating deleted files and folders that were cleaned up from the Conflict and Deleted folder",
[]string{"name"},
nil,
),
FolderDeletedBytesGeneratedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_generated_bytes_total"),
"Total size (in bytes) of replicated deleted files and folders that were moved to the Conflict and Deleted folder after they were deleted from a replicated folder on a sending member",
[]string{"name"},
nil,
),
FolderDeletedFilesCleanedUpTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_cleaned_up_files_total"),
"Number of files and folders that were cleaned up from the Conflict and Deleted folder",
[]string{"name"},
nil,
),
FolderDeletedFilesGeneratedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_generated_files_total"),
"Number of deleted files and folders that were moved to the Conflict and Deleted folder",
[]string{"name"},
nil,
),
FolderFileInstallsRetriedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_file_installs_retried_total"),
"Total number of file installs that are being retried due to sharing violations or other errors encountered when installing the files",
[]string{"name"},
nil,
),
FolderFileInstallsSucceededTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_file_installs_succeeded_total"),
"Total number of files that were successfully received from sending members and installed locally on this server",
[]string{"name"},
nil,
),
FolderFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_received_files_total"),
"Total number of files received",
[]string{"name"},
nil,
),
FolderRDCBytesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_received_bytes_total"),
"Total number of bytes received in replicating files using Remote Differential Compression",
[]string{"name"},
nil,
),
FolderRDCCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_compressed_size_of_received_files_bytes_total"),
"Total compressed size (in bytes) of the files received with Remote Differential Compression",
[]string{"name"},
nil,
),
FolderRDCNumberofFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_received_files_total"),
"Total number of files received with Remote Differential Compression",
[]string{"name"},
nil,
),
FolderRDCSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_files_received_bytes_total"),
"Total uncompressed size (in bytes) of the files received with Remote Differential Compression",
[]string{"name"},
nil,
),
FolderSizeOfFilesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_files_received_bytes_total"),
"Total uncompressed size (in bytes) of the files received",
[]string{"name"},
nil,
),
FolderStagingSpaceInUse: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_space_in_use_bytes"),
"Total size of files and folders currently in the staging folder.",
[]string{"name"},
nil,
),
FolderStagingBytesCleanedUpTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_cleaned_up_bytes_total"),
"Total size (in bytes) of the files and folders that have been cleaned up from the staging folder",
[]string{"name"},
nil,
),
FolderStagingBytesGeneratedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_generated_bytes_total"),
"Total size (in bytes) of replicated files and folders in the staging folder created by the DFS Replication service since last restart",
[]string{"name"},
nil,
),
FolderStagingFilesCleanedUpTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_cleaned_up_files_total"),
"Total number of files and folders that have been cleaned up from the staging folder",
[]string{"name"},
nil,
),
FolderStagingFilesGeneratedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_generated_files_total"),
"Total number of times replicated files and folders have been staged by the DFS Replication service",
[]string{"name"},
nil,
),
FolderUpdatesDroppedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "folder_dropped_updates_total"),
"Total number of redundant file replication update records that have been ignored by the DFS Replication service because they did not change the replicated file or folder",
[]string{"name"},
nil,
),
// Volume
VolumeDatabaseCommitsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "volume_database_commits_total"),
"Total number of DFSR Volume database commits",
[]string{"name"},
nil,
),
VolumeDatabaseLookupsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "volume_database_lookups_total"),
"Total number of DFSR Volume database lookups",
[]string{"name"},
nil,
),
VolumeUSNJournalUnreadPercentage: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "volume_usn_journal_unread_percentage"),
"Percentage of DFSR Volume USN journal records that are unread",
[]string{"name"},
nil,
),
VolumeUSNJournalRecordsAcceptedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "volume_usn_journal_accepted_records_total"),
"Total number of USN journal records accepted",
[]string{"name"},
nil,
),
VolumeUSNJournalRecordsReadTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "volume_usn_journal_read_records_total"),
"Total number of DFSR Volume USN journal records read",
[]string{"name"},
nil,
),
}
dfsrCollector.dfsrChildCollectors = dfsrCollector.getDFSRChildCollectors(enabled)
return &dfsrCollector, nil
}
// Maps enabled child collectors names to their relevant collection function,
// for use in DFSRCollector.Collect()
func (c *DFSRCollector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCollectorFunc {
var dfsrCollectors []dfsrCollectorFunc
for _, collector := range enabledCollectors {
switch collector {
case "connection":
dfsrCollectors = append(dfsrCollectors, c.collectConnection)
case "folder":
dfsrCollectors = append(dfsrCollectors, c.collectFolder)
case "volume":
dfsrCollectors = append(dfsrCollectors, c.collectVolume)
}
}
return dfsrCollectors
}
// Collect implements the Collector interface.
// Sends metric values for each metric to the provided prometheus Metric channel.
func (c *DFSRCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
for _, fn := range c.dfsrChildCollectors {
err := fn(ctx, ch)
if err != nil {
return err
}
}
return nil
}
// Perflib: "DFS Replication Service Connections"
type PerflibDFSRConnection struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
BytesReceivedTotal float64 `perflib:"Total Bytes Received"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberofFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
}
func (c *DFSRCollector) collectConnection(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRConnection
if err := unmarshalObject(ctx.perfObjects["DFS Replication Connections"], &dst, c.logger); err != nil {
return err
}
for _, connection := range dst {
ch <- prometheus.MustNewConstMetric(
c.ConnectionBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
connection.BandwidthSavingsUsingDFSReplicationTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionBytesReceivedTotal,
prometheus.CounterValue,
connection.BytesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.CompressedSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionFilesReceivedTotal,
prometheus.CounterValue,
connection.FilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCBytesReceivedTotal,
prometheus.CounterValue,
connection.RDCBytesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCCompressedSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCNumberofFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCNumberofFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.SizeOfFilesReceivedTotal,
connection.Name,
)
}
return nil
}
// Perflib: "DFS Replicated Folder"
type PerflibDFSRFolder struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
ConflictBytesCleanedupTotal float64 `perflib:"Conflict Bytes Cleaned Up"`
ConflictBytesGeneratedTotal float64 `perflib:"Conflict Bytes Generated"`
ConflictFilesCleanedUpTotal float64 `perflib:"Conflict Files Cleaned Up"`
ConflictFilesGeneratedTotal float64 `perflib:"Conflict Files Generated"`
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict Folder Cleanups Completed"`
ConflictSpaceInUse float64 `perflib:"Conflict Space In Use"`
DeletedSpaceInUse float64 `perflib:"Deleted Space In Use"`
DeletedBytesCleanedUpTotal float64 `perflib:"Deleted Bytes Cleaned Up"`
DeletedBytesGeneratedTotal float64 `perflib:"Deleted Bytes Generated"`
DeletedFilesCleanedUpTotal float64 `perflib:"Deleted Files Cleaned Up"`
DeletedFilesGeneratedTotal float64 `perflib:"Deleted Files Generated"`
FileInstallsRetriedTotal float64 `perflib:"File Installs Retried"`
FileInstallsSucceededTotal float64 `perflib:"File Installs Succeeded"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberofFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
StagingSpaceInUse float64 `perflib:"Staging Space In Use"`
StagingBytesCleanedUpTotal float64 `perflib:"Staging Bytes Cleaned Up"`
StagingBytesGeneratedTotal float64 `perflib:"Staging Bytes Generated"`
StagingFilesCleanedUpTotal float64 `perflib:"Staging Files Cleaned Up"`
StagingFilesGeneratedTotal float64 `perflib:"Staging Files Generated"`
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
}
func (c *DFSRCollector) collectFolder(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRFolder
if err := unmarshalObject(ctx.perfObjects["DFS Replicated Folders"], &dst, c.logger); err != nil {
return err
}
for _, folder := range dst {
ch <- prometheus.MustNewConstMetric(
c.FolderBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
folder.BandwidthSavingsUsingDFSReplicationTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.CompressedSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictBytesCleanedupTotal,
prometheus.CounterValue,
folder.ConflictBytesCleanedupTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictBytesGeneratedTotal,
prometheus.CounterValue,
folder.ConflictBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictFilesCleanedUpTotal,
prometheus.CounterValue,
folder.ConflictFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictFilesGeneratedTotal,
prometheus.CounterValue,
folder.ConflictFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictFolderCleanupsCompletedTotal,
prometheus.CounterValue,
folder.ConflictFolderCleanupsCompletedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictSpaceInUse,
prometheus.GaugeValue,
folder.ConflictSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedSpaceInUse,
prometheus.GaugeValue,
folder.DeletedSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedBytesCleanedUpTotal,
prometheus.CounterValue,
folder.DeletedBytesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedBytesGeneratedTotal,
prometheus.CounterValue,
folder.DeletedBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedFilesCleanedUpTotal,
prometheus.CounterValue,
folder.DeletedFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedFilesGeneratedTotal,
prometheus.CounterValue,
folder.DeletedFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderFileInstallsRetriedTotal,
prometheus.CounterValue,
folder.FileInstallsRetriedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderFileInstallsSucceededTotal,
prometheus.CounterValue,
folder.FileInstallsSucceededTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderFilesReceivedTotal,
prometheus.CounterValue,
folder.FilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCBytesReceivedTotal,
prometheus.CounterValue,
folder.RDCBytesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCCompressedSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCNumberofFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCNumberofFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.SizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingSpaceInUse,
prometheus.GaugeValue,
folder.StagingSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingBytesCleanedUpTotal,
prometheus.CounterValue,
folder.StagingBytesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingBytesGeneratedTotal,
prometheus.CounterValue,
folder.StagingBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingFilesCleanedUpTotal,
prometheus.CounterValue,
folder.StagingFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingFilesGeneratedTotal,
prometheus.CounterValue,
folder.StagingFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderUpdatesDroppedTotal,
prometheus.CounterValue,
folder.UpdatesDroppedTotal,
folder.Name,
)
}
return nil
}
// Perflib: "DFS Replication Service Volumes"
type PerflibDFSRVolume struct {
Name string
DatabaseCommitsTotal float64 `perflib:"Database Commits"`
DatabaseLookupsTotal float64 `perflib:"Database Lookups"`
USNJournalRecordsReadTotal float64 `perflib:"USN Journal Records Read"`
USNJournalRecordsAcceptedTotal float64 `perflib:"USN Journal Records Accepted"`
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
}
func (c *DFSRCollector) collectVolume(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRVolume
if err := unmarshalObject(ctx.perfObjects["DFS Replication Service Volumes"], &dst, c.logger); err != nil {
return err
}
for _, volume := range dst {
ch <- prometheus.MustNewConstMetric(
c.VolumeDatabaseLookupsTotal,
prometheus.CounterValue,
volume.DatabaseLookupsTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeDatabaseCommitsTotal,
prometheus.CounterValue,
volume.DatabaseCommitsTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeUSNJournalRecordsAcceptedTotal,
prometheus.CounterValue,
volume.USNJournalRecordsAcceptedTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeUSNJournalRecordsReadTotal,
prometheus.CounterValue,
volume.USNJournalRecordsReadTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeUSNJournalUnreadPercentage,
prometheus.GaugeValue,
volume.USNJournalUnreadPercentage,
volume.Name,
)
}
return nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkDFSRCollector(b *testing.B) {
benchmarkCollector(b, "dfsr", newDFSRCollector)
}

View File

@@ -1,389 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
// A DhcpCollector is a Prometheus collector perflib DHCP metrics
type DhcpCollector struct {
logger log.Logger
PacketsReceivedTotal *prometheus.Desc
DuplicatesDroppedTotal *prometheus.Desc
PacketsExpiredTotal *prometheus.Desc
ActiveQueueLength *prometheus.Desc
ConflictCheckQueueLength *prometheus.Desc
DiscoversTotal *prometheus.Desc
OffersTotal *prometheus.Desc
RequestsTotal *prometheus.Desc
InformsTotal *prometheus.Desc
AcksTotal *prometheus.Desc
NacksTotal *prometheus.Desc
DeclinesTotal *prometheus.Desc
ReleasesTotal *prometheus.Desc
OfferQueueLength *prometheus.Desc
DeniedDueToMatch *prometheus.Desc
DeniedDueToNonMatch *prometheus.Desc
FailoverBndupdSentTotal *prometheus.Desc
FailoverBndupdReceivedTotal *prometheus.Desc
FailoverBndackSentTotal *prometheus.Desc
FailoverBndackReceivedTotal *prometheus.Desc
FailoverBndupdPendingOutboundQueue *prometheus.Desc
FailoverTransitionsCommunicationinterruptedState *prometheus.Desc
FailoverTransitionsPartnerdownState *prometheus.Desc
FailoverTransitionsRecoverState *prometheus.Desc
FailoverBndupdDropped *prometheus.Desc
}
func newDhcpCollector(logger log.Logger) (Collector, error) {
const subsystem = "dhcp"
return &DhcpCollector{
logger: log.With(logger, "collector", subsystem),
PacketsReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_received_total"),
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
nil,
nil,
),
DuplicatesDroppedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "duplicates_dropped_total"),
"Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)",
nil,
nil,
),
PacketsExpiredTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_expired_total"),
"Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)",
nil,
nil,
),
ActiveQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "active_queue_length"),
"Number of packets in the processing queue of the DHCP server (ActiveQueueLength)",
nil,
nil,
),
ConflictCheckQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "conflict_check_queue_length"),
"Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)",
nil,
nil,
),
DiscoversTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "discovers_total"),
"Total DHCP Discovers received by the DHCP server (DiscoversTotal)",
nil,
nil,
),
OffersTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "offers_total"),
"Total DHCP Offers sent by the DHCP server (OffersTotal)",
nil,
nil,
),
RequestsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "requests_total"),
"Total DHCP Requests received by the DHCP server (RequestsTotal)",
nil,
nil,
),
InformsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "informs_total"),
"Total DHCP Informs received by the DHCP server (InformsTotal)",
nil,
nil,
),
AcksTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "acks_total"),
"Total DHCP Acks sent by the DHCP server (AcksTotal)",
nil,
nil,
),
NacksTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "nacks_total"),
"Total DHCP Nacks sent by the DHCP server (NacksTotal)",
nil,
nil,
),
DeclinesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "declines_total"),
"Total DHCP Declines received by the DHCP server (DeclinesTotal)",
nil,
nil,
),
ReleasesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "releases_total"),
"Total DHCP Releases received by the DHCP server (ReleasesTotal)",
nil,
nil,
),
OfferQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "offer_queue_length"),
"Number of packets in the offer queue of the DHCP server (OfferQueueLength)",
nil,
nil,
),
DeniedDueToMatch: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "denied_due_to_match_total"),
"Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)",
nil,
nil,
),
DeniedDueToNonMatch: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "denied_due_to_nonmatch_total"),
"Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)",
nil,
nil,
),
FailoverBndupdSentTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_sent_total"),
"Number of DHCP failover Binding Update messages sent (FailoverBndupdSentTotal)",
nil,
nil,
),
FailoverBndupdReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_received_total"),
"Number of DHCP failover Binding Update messages received (FailoverBndupdReceivedTotal)",
nil,
nil,
),
FailoverBndackSentTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndack_sent_total"),
"Number of DHCP failover Binding Ack messages sent (FailoverBndackSentTotal)",
nil,
nil,
),
FailoverBndackReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndack_received_total"),
"Number of DHCP failover Binding Ack messages received (FailoverBndackReceivedTotal)",
nil,
nil,
),
FailoverBndupdPendingOutboundQueue: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_pending_in_outbound_queue"),
"Number of pending outbound DHCP failover Binding Update messages (FailoverBndupdPendingOutboundQueue)",
nil,
nil,
),
FailoverTransitionsCommunicationinterruptedState: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_communicationinterrupted_state_total"),
"Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)",
nil,
nil,
),
FailoverTransitionsPartnerdownState: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_partnerdown_state_total"),
"Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)",
nil,
nil,
),
FailoverTransitionsRecoverState: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_recover_total"),
"Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)",
nil,
nil,
),
FailoverBndupdDropped: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_dropped_total"),
"Total number of DHCP faileover Binding Updates dropped (FailoverBndupdDropped)",
nil,
nil,
),
}, nil
}
// represents perflib metrics from the DHCP Server class.
// While the name of a number of perflib metrics would indicate a rate is being returned (E.G. Packets Received/sec),
// perflib instead returns a counter, hence the "Total" suffix in some of the variable names.
type dhcpPerf struct {
PacketsReceivedTotal float64 `perflib:"Packets Received/sec"`
DuplicatesDroppedTotal float64 `perflib:"Duplicates Dropped/sec"`
PacketsExpiredTotal float64 `perflib:"Packets Expired/sec"`
ActiveQueueLength float64 `perflib:"Active Queue Length"`
ConflictCheckQueueLength float64 `perflib:"Conflict Check Queue Length"`
DiscoversTotal float64 `perflib:"Discovers/sec"`
OffersTotal float64 `perflib:"Offers/sec"`
RequestsTotal float64 `perflib:"Requests/sec"`
InformsTotal float64 `perflib:"Informs/sec"`
AcksTotal float64 `perflib:"Acks/sec"`
NacksTotal float64 `perflib:"Nacks/sec"`
DeclinesTotal float64 `perflib:"Declines/sec"`
ReleasesTotal float64 `perflib:"Releases/sec"`
DeniedDueToMatch float64 `perflib:"Denied due to match."`
DeniedDueToNonMatch float64 `perflib:"Denied due to match."`
OfferQueueLength float64 `perflib:"Offer Queue Length"`
FailoverBndupdSentTotal float64 `perflib:"Failover: BndUpd sent/sec."`
FailoverBndupdReceivedTotal float64 `perflib:"Failover: BndUpd received/sec."`
FailoverBndackSentTotal float64 `perflib:"Failover: BndAck sent/sec."`
FailoverBndackReceivedTotal float64 `perflib:"Failover: BndAck received/sec."`
FailoverBndupdPendingOutboundQueue float64 `perflib:"Failover: BndUpd pending in outbound queue."`
FailoverTransitionsCommunicationinterruptedState float64 `perflib:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
FailoverTransitionsPartnerdownState float64 `perflib:"Failover: Transitions to PARTNER-DOWN state."`
FailoverTransitionsRecoverState float64 `perflib:"Failover: Transitions to RECOVER state."`
FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."`
}
func (c *DhcpCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var perflib []dhcpPerf
if err := unmarshalObject(ctx.perfObjects["DHCP Server"], &perflib, c.logger); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.PacketsReceivedTotal,
prometheus.CounterValue,
perflib[0].PacketsReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DuplicatesDroppedTotal,
prometheus.CounterValue,
perflib[0].DuplicatesDroppedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsExpiredTotal,
prometheus.CounterValue,
perflib[0].PacketsExpiredTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveQueueLength,
prometheus.GaugeValue,
perflib[0].ActiveQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.ConflictCheckQueueLength,
prometheus.GaugeValue,
perflib[0].ConflictCheckQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.DiscoversTotal,
prometheus.CounterValue,
perflib[0].DiscoversTotal,
)
ch <- prometheus.MustNewConstMetric(
c.OffersTotal,
prometheus.CounterValue,
perflib[0].OffersTotal,
)
ch <- prometheus.MustNewConstMetric(
c.RequestsTotal,
prometheus.CounterValue,
perflib[0].RequestsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.InformsTotal,
prometheus.CounterValue,
perflib[0].InformsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AcksTotal,
prometheus.CounterValue,
perflib[0].AcksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.NacksTotal,
prometheus.CounterValue,
perflib[0].NacksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DeclinesTotal,
prometheus.CounterValue,
perflib[0].DeclinesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ReleasesTotal,
prometheus.CounterValue,
perflib[0].ReleasesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.OfferQueueLength,
prometheus.GaugeValue,
perflib[0].OfferQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.DeniedDueToMatch,
prometheus.CounterValue,
perflib[0].DeniedDueToMatch,
)
ch <- prometheus.MustNewConstMetric(
c.DeniedDueToNonMatch,
prometheus.CounterValue,
perflib[0].DeniedDueToNonMatch,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdSentTotal,
prometheus.CounterValue,
perflib[0].FailoverBndupdSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdReceivedTotal,
prometheus.CounterValue,
perflib[0].FailoverBndupdReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndackSentTotal,
prometheus.CounterValue,
perflib[0].FailoverBndackSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndackReceivedTotal,
prometheus.CounterValue,
perflib[0].FailoverBndackReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdPendingOutboundQueue,
prometheus.GaugeValue,
perflib[0].FailoverBndupdPendingOutboundQueue,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsCommunicationinterruptedState,
prometheus.CounterValue,
perflib[0].FailoverTransitionsCommunicationinterruptedState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsPartnerdownState,
prometheus.CounterValue,
perflib[0].FailoverTransitionsPartnerdownState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsRecoverState,
prometheus.CounterValue,
perflib[0].FailoverTransitionsRecoverState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdDropped,
prometheus.CounterValue,
perflib[0].FailoverBndupdDropped,
)
return nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkDHCPCollector(b *testing.B) {
benchmarkCollector(b, "dhcp", newDhcpCollector)
}

View File

@@ -1,209 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
const (
win32DiskQuery = "SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive"
)
// A DiskDriveInfoCollector is a Prometheus collector for a few WMI metrics in Win32_DiskDrive
type DiskDriveInfoCollector struct {
logger log.Logger
DiskInfo *prometheus.Desc
Status *prometheus.Desc
Size *prometheus.Desc
Partitions *prometheus.Desc
Availability *prometheus.Desc
}
func newDiskDriveInfoCollector(logger log.Logger) (Collector, error) {
const subsystem = "diskdrive"
return &DiskDriveInfoCollector{
logger: log.With(logger, "collector", subsystem),
DiskInfo: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "info"),
"General drive information",
[]string{
"device_id",
"model",
"caption",
"name",
},
nil,
),
Status: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "status"),
"Status of the drive",
[]string{
"name", "status"},
nil,
),
Size: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "size"),
"Size of the disk drive. It is calculated by multiplying the total number of cylinders, tracks in each cylinder, sectors in each track, and bytes in each sector.",
[]string{"name"},
nil,
),
Partitions: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "partitions"),
"Number of partitions",
[]string{"name"},
nil,
),
Availability: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "availability"),
"Availability Status",
[]string{
"name", "availability"},
nil,
),
}, nil
}
type Win32_DiskDrive struct {
DeviceID string
Model string
Size uint64
Name string
Caption string
Partitions uint32
Status string
Availability uint16
}
var (
allDiskStatus = []string{
"OK",
"Error",
"Degraded",
"Unknown",
"Pred fail",
"Starting",
"Stopping",
"Service",
"Stressed",
"Nonrecover",
"No Contact",
"Lost Comm",
}
availMap = map[int]string{
1: "Other",
2: "Unknown",
3: "Running / Full Power",
4: "Warning",
5: "In Test",
6: "Not Applicable",
7: "Power Off",
8: "Off line",
9: "Off Duty",
10: "Degraded",
11: "Not Installed",
12: "Install Error",
13: "Power Save - Unknown",
14: "Power Save - Low Power Mode",
15: "Power Save - Standby",
16: "Power Cycle",
17: "Power Save - Warning",
18: "Paused",
19: "Not Ready",
20: "Not Configured",
21: "Quiesced",
}
)
// Collect sends the metric values for each metric to the provided prometheus Metric channel.
func (c *DiskDriveInfoCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "desc", desc, "err", err)
return err
}
return nil
}
func (c *DiskDriveInfoCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_DiskDrive
if err := wmi.Query(win32DiskQuery, &dst); err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
for _, disk := range dst {
ch <- prometheus.MustNewConstMetric(
c.DiskInfo,
prometheus.GaugeValue,
1.0,
strings.Trim(disk.DeviceID, "\\.\\"),
strings.TrimRight(disk.Model, " "),
strings.TrimRight(disk.Caption, " "),
strings.TrimRight(disk.Name, "\\.\\"),
)
for _, status := range allDiskStatus {
isCurrentState := 0.0
if status == disk.Status {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.Status,
prometheus.GaugeValue,
isCurrentState,
strings.Trim(disk.Name, "\\.\\"),
status,
)
}
ch <- prometheus.MustNewConstMetric(
c.Size,
prometheus.GaugeValue,
float64(disk.Size),
strings.Trim(disk.Name, "\\.\\"),
)
ch <- prometheus.MustNewConstMetric(
c.Partitions,
prometheus.GaugeValue,
float64(disk.Partitions),
strings.Trim(disk.Name, "\\.\\"),
)
for availNum, val := range availMap {
isCurrentState := 0.0
if availNum == int(disk.Availability) {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.Availability,
prometheus.GaugeValue,
isCurrentState,
strings.Trim(disk.Name, "\\.\\"),
val,
)
}
}
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkDiskDriveCollector(b *testing.B) {
benchmarkCollector(b, "disk_drive", newDiskDriveInfoCollector)
}

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package collector
@@ -6,16 +5,17 @@ package collector
import (
"errors"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("dns", NewDNSCollector)
}
// A DNSCollector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
type DNSCollector struct {
logger log.Logger
ZoneTransferRequestsReceived *prometheus.Desc
ZoneTransferRequestsSent *prometheus.Desc
ZoneTransferResponsesReceived *prometheus.Desc
@@ -40,12 +40,10 @@ type DNSCollector struct {
UnmatchedResponsesReceived *prometheus.Desc
}
// newDNSCollector ...
func newDNSCollector(logger log.Logger) (Collector, error) {
// NewDNSCollector ...
func NewDNSCollector() (Collector, error) {
const subsystem = "dns"
return &DNSCollector{
logger: log.With(logger, "collector", subsystem),
ZoneTransferRequestsReceived: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_requests_received_total"),
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
@@ -83,8 +81,8 @@ func newDNSCollector(logger log.Logger) (Collector, error) {
nil,
),
MemoryUsedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "memory_used_bytes"),
"Current memory used by DNS server",
prometheus.BuildFQName(Namespace, subsystem, "memory_used_bytes_total"),
"Total memory used by DNS server",
[]string{"area"},
nil,
),
@@ -138,7 +136,7 @@ func newDNSCollector(logger log.Logger) (Collector, error) {
),
Responses: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "responses_total"),
"Number of responses sent by DNS server",
"Number of reponses sent by DNS server",
[]string{"protocol"},
nil,
),
@@ -185,7 +183,7 @@ func newDNSCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting dns metrics", "desc", desc, "err", err)
log.Error("failed collecting dns metrics:", desc, err)
return err
}
return nil
@@ -239,7 +237,7 @@ type Win32_PerfRawData_DNS_DNS struct {
func (c *DNSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_DNS_DNS
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkDNSCollector(b *testing.B) {
benchmarkCollector(b, "dns", newDNSCollector)
}

View File

@@ -1,638 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"fmt"
"os"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
const (
FlagExchangeListAllCollectors = "collectors.exchange.list"
FlagExchangeCollectorsEnabled = "collectors.exchange.enabled"
)
type exchangeCollector struct {
logger log.Logger
LDAPReadTime *prometheus.Desc
LDAPSearchTime *prometheus.Desc
LDAPWriteTime *prometheus.Desc
LDAPTimeoutErrorsPerSec *prometheus.Desc
LongRunningLDAPOperationsPerMin *prometheus.Desc
ExternalActiveRemoteDeliveryQueueLength *prometheus.Desc
InternalActiveRemoteDeliveryQueueLength *prometheus.Desc
ActiveMailboxDeliveryQueueLength *prometheus.Desc
RetryMailboxDeliveryQueueLength *prometheus.Desc
UnreachableQueueLength *prometheus.Desc
ExternalLargestDeliveryQueueLength *prometheus.Desc
InternalLargestDeliveryQueueLength *prometheus.Desc
PoisonQueueLength *prometheus.Desc
MailboxServerLocatorAverageLatency *prometheus.Desc
AverageAuthenticationLatency *prometheus.Desc
AverageCASProcessingLatency *prometheus.Desc
MailboxServerProxyFailureRate *prometheus.Desc
OutstandingProxyRequests *prometheus.Desc
ProxyRequestsPerSec *prometheus.Desc
ActiveSyncRequestsPerSec *prometheus.Desc
PingCommandsPending *prometheus.Desc
SyncCommandsPerSec *prometheus.Desc
AvailabilityRequestsSec *prometheus.Desc
CurrentUniqueUsers *prometheus.Desc
OWARequestsPerSec *prometheus.Desc
AutodiscoverRequestsPerSec *prometheus.Desc
ActiveTasks *prometheus.Desc
CompletedTasks *prometheus.Desc
QueuedTasks *prometheus.Desc
YieldedTasks *prometheus.Desc
IsActive *prometheus.Desc
RPCAveragedLatency *prometheus.Desc
RPCRequests *prometheus.Desc
ActiveUserCount *prometheus.Desc
ConnectionCount *prometheus.Desc
RPCOperationsPerSec *prometheus.Desc
UserCount *prometheus.Desc
enabledCollectors []string
}
var (
// All available collector functions
exchangeAllCollectorNames = []string{
"ADAccessProcesses",
"TransportQueues",
"HttpProxy",
"ActiveSync",
"AvailabilityService",
"OutlookWebAccess",
"Autodiscover",
"WorkloadManagement",
"RpcClientAccess",
}
argExchangeListAllCollectors *bool
argExchangeCollectorsEnabled *string
)
// newExchangeCollectorFlags ...
func newExchangeCollectorFlags(app *kingpin.Application) {
argExchangeListAllCollectors = app.Flag(
FlagExchangeListAllCollectors,
"List the collectors along with their perflib object name/ids",
).Bool()
argExchangeCollectorsEnabled = app.Flag(
FlagExchangeCollectorsEnabled,
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default("").String()
}
// newExchangeCollector returns a new Collector
func newExchangeCollector(logger log.Logger) (Collector, error) {
const subsystem = "exchange"
// desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
return prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "exchange", metricName),
description,
labels,
nil,
)
}
c := exchangeCollector{
logger: log.With(logger, "collector", subsystem),
RPCAveragedLatency: desc("rpc_avg_latency_sec", "The latency (sec), averaged for the past 1024 packets"),
RPCRequests: desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service"),
ActiveUserCount: desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes"),
ConnectionCount: desc("rpc_connection_count", "Total number of client connections maintained"),
RPCOperationsPerSec: desc("rpc_operations_total", "The rate at which RPC operations occur"),
UserCount: desc("rpc_user_count", "Number of users"),
LDAPReadTime: desc("ldap_read_time_sec", "Time (sec) to send an LDAP read request and receive a response", "name"),
LDAPSearchTime: desc("ldap_search_time_sec", "Time (sec) to send an LDAP search request and receive a response", "name"),
LDAPWriteTime: desc("ldap_write_time_sec", "Time (sec) to send an LDAP Add/Modify/Delete request and receive a response", "name"),
LDAPTimeoutErrorsPerSec: desc("ldap_timeout_errors_total", "Total number of LDAP timeout errors", "name"),
LongRunningLDAPOperationsPerMin: desc("ldap_long_running_ops_per_sec", "Long Running LDAP operations per second", "name"),
ExternalActiveRemoteDeliveryQueueLength: desc("transport_queues_external_active_remote_delivery", "External Active Remote Delivery Queue length", "name"),
InternalActiveRemoteDeliveryQueueLength: desc("transport_queues_internal_active_remote_delivery", "Internal Active Remote Delivery Queue length", "name"),
ActiveMailboxDeliveryQueueLength: desc("transport_queues_active_mailbox_delivery", "Active Mailbox Delivery Queue length", "name"),
RetryMailboxDeliveryQueueLength: desc("transport_queues_retry_mailbox_delivery", "Retry Mailbox Delivery Queue length", "name"),
UnreachableQueueLength: desc("transport_queues_unreachable", "Unreachable Queue length", "name"),
ExternalLargestDeliveryQueueLength: desc("transport_queues_external_largest_delivery", "External Largest Delivery Queue length", "name"),
InternalLargestDeliveryQueueLength: desc("transport_queues_internal_largest_delivery", "Internal Largest Delivery Queue length", "name"),
PoisonQueueLength: desc("transport_queues_poison", "Poison Queue length", "name"),
MailboxServerLocatorAverageLatency: desc("http_proxy_mailbox_server_locator_avg_latency_sec", "Average latency (sec) of MailboxServerLocator web service calls", "name"),
AverageAuthenticationLatency: desc("http_proxy_avg_auth_latency", "Average time spent authenticating CAS requests over the last 200 samples", "name"),
OutstandingProxyRequests: desc("http_proxy_outstanding_proxy_requests", "Number of concurrent outstanding proxy requests", "name"),
ProxyRequestsPerSec: desc("http_proxy_requests_total", "Number of proxy requests processed each second", "name"),
AvailabilityRequestsSec: desc("avail_service_requests_per_sec", "Number of requests serviced per second"),
CurrentUniqueUsers: desc("owa_current_unique_users", "Number of unique users currently logged on to Outlook Web App"),
OWARequestsPerSec: desc("owa_requests_total", "Number of requests handled by Outlook Web App per second"),
AutodiscoverRequestsPerSec: desc("autodiscover_requests_total", "Number of autodiscover service requests processed each second"),
ActiveTasks: desc("workload_active_tasks", "Number of active tasks currently running in the background for workload management", "name"),
CompletedTasks: desc("workload_completed_tasks", "Number of workload management tasks that have been completed", "name"),
QueuedTasks: desc("workload_queued_tasks", "Number of workload management tasks that are currently queued up waiting to be processed", "name"),
YieldedTasks: desc("workload_yielded_tasks", "The total number of tasks that have been yielded by a workload", "name"),
IsActive: desc("workload_is_active", "Active indicates whether the workload is in an active (1) or paused (0) state", "name"),
ActiveSyncRequestsPerSec: desc("activesync_requests_total", "Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load"),
AverageCASProcessingLatency: desc("http_proxy_avg_cas_proccessing_latency_sec", "Average latency (sec) of CAS processing time over the last 200 reqs", "name"),
MailboxServerProxyFailureRate: desc("http_proxy_mailbox_proxy_failure_rate", "% of failures between this CAS and MBX servers over the last 200 samples", "name"),
PingCommandsPending: desc("activesync_ping_cmds_pending", "Number of ping commands currently pending in the queue"),
SyncCommandsPerSec: desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder"),
enabledCollectors: make([]string, 0, len(exchangeAllCollectorNames)),
}
collectorDesc := map[string]string{
"ADAccessProcesses": "[19108] MSExchange ADAccess Processes",
"TransportQueues": "[20524] MSExchangeTransport Queues",
"HttpProxy": "[36934] MSExchange HttpProxy",
"ActiveSync": "[25138] MSExchange ActiveSync",
"AvailabilityService": "[24914] MSExchange Availability Service",
"OutlookWebAccess": "[24618] MSExchange OWA",
"Autodiscover": "[29240] MSExchange Autodiscover",
"WorkloadManagement": "[19430] MSExchange WorkloadManagement Workloads",
"RpcClientAccess": "[29336] MSExchange RpcClientAccess",
}
if *argExchangeListAllCollectors {
fmt.Printf("%-32s %-32s\n", "Collector Name", "[PerfID] Perflib Object")
for _, cname := range exchangeAllCollectorNames {
fmt.Printf("%-32s %-32s\n", cname, collectorDesc[cname])
}
os.Exit(0)
}
if *argExchangeCollectorsEnabled == "" {
for _, collectorName := range exchangeAllCollectorNames {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
}
} else {
for _, collectorName := range strings.Split(*argExchangeCollectorsEnabled, ",") {
if find(exchangeAllCollectorNames, collectorName) {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
} else {
return nil, fmt.Errorf("Unknown exchange collector: %s", collectorName)
}
}
}
return &c, nil
}
// Collect collects exchange metrics and sends them to prometheus
func (c *exchangeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ctx *ScrapeContext, ch chan<- prometheus.Metric) error{
"ADAccessProcesses": c.collectADAccessProcesses,
"TransportQueues": c.collectTransportQueues,
"HttpProxy": c.collectHTTPProxy,
"ActiveSync": c.collectActiveSync,
"AvailabilityService": c.collectAvailabilityService,
"OutlookWebAccess": c.collectOWA,
"Autodiscover": c.collectAutoDiscover,
"WorkloadManagement": c.collectWorkloadManagementWorkloads,
"RpcClientAccess": c.collectRPC,
}
for _, collectorName := range c.enabledCollectors {
if err := collectorFuncs[collectorName](ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err)
return err
}
}
return nil
}
// Perflib: [19108] MSExchange ADAccess Processes
type perflibADAccessProcesses struct {
Name string
LDAPReadTime float64 `perflib:"LDAP Read Time"`
LDAPSearchTime float64 `perflib:"LDAP Search Time"`
LDAPWriteTime float64 `perflib:"LDAP Write Time"`
LDAPTimeoutErrorsPerSec float64 `perflib:"LDAP Timeout Errors/sec"`
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
}
func (c *exchangeCollector) collectADAccessProcesses(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibADAccessProcesses
if err := unmarshalObject(ctx.perfObjects["MSExchange ADAccess Processes"], &data, c.logger); err != nil {
return err
}
labelUseCount := make(map[string]int)
for _, proc := range data {
labelName := c.toLabelName(proc.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
// since we're not including the PID suffix from the instance names in the label names,
// we get an occasional duplicate. This seems to affect about 4 instances only on this object.
labelUseCount[labelName]++
if labelUseCount[labelName] > 1 {
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
}
ch <- prometheus.MustNewConstMetric(
c.LDAPReadTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPReadTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPSearchTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPSearchTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPWriteTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPWriteTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPTimeoutErrorsPerSec,
prometheus.CounterValue,
proc.LDAPTimeoutErrorsPerSec,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LongRunningLDAPOperationsPerMin,
prometheus.CounterValue,
proc.LongRunningLDAPOperationsPerMin*60,
labelName,
)
}
return nil
}
// Perflib: [24914] MSExchange Availability Service
type perflibAvailabilityService struct {
RequestsSec float64 `perflib:"Availability Requests (sec)"`
}
func (c *exchangeCollector) collectAvailabilityService(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAvailabilityService
if err := unmarshalObject(ctx.perfObjects["MSExchange Availability Service"], &data, c.logger); err != nil {
return err
}
for _, availservice := range data {
ch <- prometheus.MustNewConstMetric(
c.AvailabilityRequestsSec,
prometheus.CounterValue,
availservice.RequestsSec,
)
}
return nil
}
// Perflib: [36934] MSExchange HttpProxy
type perflibHTTPProxy struct {
Name string
MailboxServerLocatorAverageLatency float64 `perflib:"MailboxServerLocator Average Latency (Moving Average)"`
AverageAuthenticationLatency float64 `perflib:"Average Authentication Latency"`
AverageCASProcessingLatency float64 `perflib:"Average ClientAccess Server Processing Latency"`
MailboxServerProxyFailureRate float64 `perflib:"Mailbox Server Proxy Failure Rate"`
OutstandingProxyRequests float64 `perflib:"Outstanding Proxy Requests"`
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
}
func (c *exchangeCollector) collectHTTPProxy(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibHTTPProxy
if err := unmarshalObject(ctx.perfObjects["MSExchange HttpProxy"], &data, c.logger); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
ch <- prometheus.MustNewConstMetric(
c.MailboxServerLocatorAverageLatency,
prometheus.GaugeValue,
c.msToSec(instance.MailboxServerLocatorAverageLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.AverageAuthenticationLatency,
prometheus.GaugeValue,
instance.AverageAuthenticationLatency,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.AverageCASProcessingLatency,
prometheus.GaugeValue,
c.msToSec(instance.AverageCASProcessingLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.MailboxServerProxyFailureRate,
prometheus.GaugeValue,
instance.MailboxServerProxyFailureRate,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.OutstandingProxyRequests,
prometheus.GaugeValue,
instance.OutstandingProxyRequests,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ProxyRequestsPerSec,
prometheus.CounterValue,
instance.ProxyRequestsPerSec,
labelName,
)
}
return nil
}
// Perflib: [24618] MSExchange OWA
type perflibOWA struct {
CurrentUniqueUsers float64 `perflib:"Current Unique Users"`
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *exchangeCollector) collectOWA(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibOWA
if err := unmarshalObject(ctx.perfObjects["MSExchange OWA"], &data, c.logger); err != nil {
return err
}
for _, owa := range data {
ch <- prometheus.MustNewConstMetric(
c.CurrentUniqueUsers,
prometheus.GaugeValue,
owa.CurrentUniqueUsers,
)
ch <- prometheus.MustNewConstMetric(
c.OWARequestsPerSec,
prometheus.CounterValue,
owa.RequestsPerSec,
)
}
return nil
}
// Perflib: [25138] MSExchange ActiveSync
type perflibActiveSync struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
PingCommandsPending float64 `perflib:"Ping Commands Pending"`
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
}
func (c *exchangeCollector) collectActiveSync(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibActiveSync
if err := unmarshalObject(ctx.perfObjects["MSExchange ActiveSync"], &data, c.logger); err != nil {
return err
}
for _, instance := range data {
ch <- prometheus.MustNewConstMetric(
c.ActiveSyncRequestsPerSec,
prometheus.CounterValue,
instance.RequestsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.PingCommandsPending,
prometheus.GaugeValue,
instance.PingCommandsPending,
)
ch <- prometheus.MustNewConstMetric(
c.SyncCommandsPerSec,
prometheus.CounterValue,
instance.SyncCommandsPerSec,
)
}
return nil
}
// Perflib: [29366] MSExchange RpcClientAccess
type perflibRPCClientAccess struct {
RPCAveragedLatency float64 `perflib:"RPC Averaged Latency"`
RPCRequests float64 `perflib:"RPC Requests"`
ActiveUserCount float64 `perflib:"Active User Count"`
ConnectionCount float64 `perflib:"Connection Count"`
RPCOperationsPerSec float64 `perflib:"RPC Operations/sec"`
UserCount float64 `perflib:"User Count"`
}
func (c *exchangeCollector) collectRPC(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibRPCClientAccess
if err := unmarshalObject(ctx.perfObjects["MSExchange RpcClientAccess"], &data, c.logger); err != nil {
return err
}
for _, rpc := range data {
ch <- prometheus.MustNewConstMetric(
c.RPCAveragedLatency,
prometheus.GaugeValue,
c.msToSec(rpc.RPCAveragedLatency),
)
ch <- prometheus.MustNewConstMetric(
c.RPCRequests,
prometheus.GaugeValue,
rpc.RPCRequests,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveUserCount,
prometheus.GaugeValue,
rpc.ActiveUserCount,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionCount,
prometheus.GaugeValue,
rpc.ConnectionCount,
)
ch <- prometheus.MustNewConstMetric(
c.RPCOperationsPerSec,
prometheus.CounterValue,
rpc.RPCOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.UserCount,
prometheus.GaugeValue,
rpc.UserCount,
)
}
return nil
}
// Perflib: [20524] MSExchangeTransport Queues
type perflibTransportQueues struct {
Name string
ExternalActiveRemoteDeliveryQueueLength float64 `perflib:"External Active Remote Delivery Queue Length"`
InternalActiveRemoteDeliveryQueueLength float64 `perflib:"Internal Active Remote Delivery Queue Length"`
ActiveMailboxDeliveryQueueLength float64 `perflib:"Active Mailbox Delivery Queue Length"`
RetryMailboxDeliveryQueueLength float64 `perflib:"Retry Mailbox Delivery Queue Length"`
UnreachableQueueLength float64 `perflib:"Unreachable Queue Length"`
ExternalLargestDeliveryQueueLength float64 `perflib:"External Largest Delivery Queue Length"`
InternalLargestDeliveryQueueLength float64 `perflib:"Internal Largest Delivery Queue Length"`
PoisonQueueLength float64 `perflib:"Poison Queue Length"`
}
func (c *exchangeCollector) collectTransportQueues(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibTransportQueues
if err := unmarshalObject(ctx.perfObjects["MSExchangeTransport Queues"], &data, c.logger); err != nil {
return err
}
for _, queue := range data {
labelName := c.toLabelName(queue.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.ExternalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.InternalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.ActiveMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.RetryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.RetryMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.UnreachableQueueLength,
prometheus.GaugeValue,
queue.UnreachableQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ExternalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.InternalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.PoisonQueueLength,
prometheus.GaugeValue,
queue.PoisonQueueLength,
labelName,
)
}
return nil
}
// Perflib: [19430] MSExchange WorkloadManagement Workloads
type perflibWorkloadManagementWorkloads struct {
Name string
ActiveTasks float64 `perflib:"ActiveTasks"`
CompletedTasks float64 `perflib:"CompletedTasks"`
QueuedTasks float64 `perflib:"QueuedTasks"`
YieldedTasks float64 `perflib:"YieldedTasks"`
IsActive float64 `perflib:"Active"`
}
func (c *exchangeCollector) collectWorkloadManagementWorkloads(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibWorkloadManagementWorkloads
if err := unmarshalObject(ctx.perfObjects["MSExchange WorkloadManagement Workloads"], &data, c.logger); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.ActiveTasks,
prometheus.GaugeValue,
instance.ActiveTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.CompletedTasks,
prometheus.CounterValue,
instance.CompletedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.QueuedTasks,
prometheus.CounterValue,
instance.QueuedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.YieldedTasks,
prometheus.CounterValue,
instance.YieldedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.IsActive,
prometheus.GaugeValue,
instance.IsActive,
labelName,
)
}
return nil
}
// [29240] MSExchangeAutodiscover
type perflibAutodiscover struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *exchangeCollector) collectAutoDiscover(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAutodiscover
if err := unmarshalObject(ctx.perfObjects["MSExchangeAutodiscover"], &data, c.logger); err != nil {
return err
}
for _, autodisc := range data {
ch <- prometheus.MustNewConstMetric(
c.AutodiscoverRequestsPerSec,
prometheus.CounterValue,
autodisc.RequestsPerSec,
)
}
return nil
}
// toLabelName converts strings to lowercase and replaces all whitespace and dots with underscores
func (c *exchangeCollector) toLabelName(name string) string {
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
s = strings.ReplaceAll(s, "__", "_")
return s
}
// msToSec converts from ms to seconds
func (c *exchangeCollector) msToSec(t float64) float64 {
return t / 1000
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkExchangeCollector(b *testing.B) {
benchmarkCollector(b, "exchange", newExchangeCollector)
}

View File

@@ -1,188 +0,0 @@
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
type FSRMQuotaCollector struct {
logger log.Logger
QuotasCount *prometheus.Desc
Path *prometheus.Desc
PeakUsage *prometheus.Desc
Size *prometheus.Desc
Usage *prometheus.Desc
Description *prometheus.Desc
Disabled *prometheus.Desc
MatchesTemplate *prometheus.Desc
SoftLimit *prometheus.Desc
Template *prometheus.Desc
}
func newFSRMQuotaCollector(logger log.Logger) (Collector, error) {
const subsystem = "fsrmquota"
return &FSRMQuotaCollector{
logger: log.With(logger, "collector", subsystem),
QuotasCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "count"),
"Number of Quotas",
nil,
nil,
),
PeakUsage: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "peak_usage_bytes"),
"The highest amount of disk space usage charged to this quota. (PeakUsage)",
[]string{"path", "template"},
nil,
),
Size: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "size_bytes"),
"The size of the quota. (Size)",
[]string{"path", "template"},
nil,
),
Usage: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "usage_bytes"),
"The current amount of disk space usage charged to this quota. (Usage)",
[]string{"path", "template"},
nil,
),
Description: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "description"),
"Description of the quota (Description)",
[]string{"path", "template", "description"},
nil,
),
Disabled: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "disabled"),
"If 1, the quota is disabled. The default value is 0. (Disabled)",
[]string{"path", "template"},
nil,
),
SoftLimit: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "softlimit"),
"If 1, the quota is a soft limit. If 0, the quota is a hard limit. The default value is 0. Optional (SoftLimit)",
[]string{"path", "template"},
nil,
),
Template: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "template"),
"Quota template name. (Template)",
[]string{"path", "template"},
nil,
),
MatchesTemplate: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "matchestemplate"),
"If 1, the property values of this quota match those values of the template from which it was derived. (MatchesTemplate)",
[]string{"path", "template"},
nil,
),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *FSRMQuotaCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "desc", desc, "err", err)
return err
}
return nil
}
// MSFT_FSRMQuota docs:
// https://docs.microsoft.com/en-us/previous-versions/windows/desktop/fsrm/msft-fsrmquota
type MSFT_FSRMQuota struct {
Name string
Path string
PeakUsage uint64
Size uint64
Usage uint64
Description string
Template string
//Threshold string
Disabled bool
MatchesTemplate bool
SoftLimit bool
}
func (c *FSRMQuotaCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []MSFT_FSRMQuota
q := queryAll(&dst, c.logger)
var count int
if err := wmi.QueryNamespace(q, &dst, "root/microsoft/windows/fsrm"); err != nil {
return nil, err
}
for _, quota := range dst {
count++
path := quota.Path
template := quota.Template
Description := quota.Description
ch <- prometheus.MustNewConstMetric(
c.PeakUsage,
prometheus.GaugeValue,
float64(quota.PeakUsage),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.Size,
prometheus.GaugeValue,
float64(quota.Size),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.Usage,
prometheus.GaugeValue,
float64(quota.Usage),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.Description,
prometheus.GaugeValue,
1.0,
path, template, Description,
)
ch <- prometheus.MustNewConstMetric(
c.Disabled,
prometheus.GaugeValue,
boolToFloat(quota.Disabled),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.MatchesTemplate,
prometheus.GaugeValue,
boolToFloat(quota.MatchesTemplate),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.SoftLimit,
prometheus.GaugeValue,
boolToFloat(quota.SoftLimit),
path,
template,
)
}
ch <- prometheus.MustNewConstMetric(
c.QuotasCount,
prometheus.GaugeValue,
float64(count),
)
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkFsrmQuotaCollector(b *testing.B) {
benchmarkCollector(b, "fsrmquota", newFSRMQuotaCollector)
}

View File

@@ -1,22 +1,21 @@
//go:build windows
// +build windows
package collector
import (
fmt "fmt"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("hyperv", NewHyperVCollector)
}
// HyperVCollector is a Prometheus collector for hyper-v
type HyperVCollector struct {
logger log.Logger
// Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
HealthCritical *prometheus.Desc
HealthOk *prometheus.Desc
@@ -53,11 +52,6 @@ type HyperVCollector struct {
LogicalProcessors *prometheus.Desc
VirtualProcessors *prometheus.Desc
// Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor
HostLPGuestRunTimePercent *prometheus.Desc
HostLPHypervisorRunTimePercent *prometheus.Desc
HostLPTotalRunTimePercent *prometheus.Desc
// Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor
HostGuestRunTime *prometheus.Desc
HostHypervisorRunTime *prometheus.Desc
@@ -116,27 +110,12 @@ type HyperVCollector struct {
VMNetworkDroppedPacketsOutgoing *prometheus.Desc
VMNetworkPacketsReceived *prometheus.Desc
VMNetworkPacketsSent *prometheus.Desc
// Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM
VMMemoryAddedMemory *prometheus.Desc
VMMemoryAveragePressure *prometheus.Desc
VMMemoryCurrentPressure *prometheus.Desc
VMMemoryGuestVisiblePhysicalMemory *prometheus.Desc
VMMemoryMaximumPressure *prometheus.Desc
VMMemoryMemoryAddOperations *prometheus.Desc
VMMemoryMemoryRemoveOperations *prometheus.Desc
VMMemoryMinimumPressure *prometheus.Desc
VMMemoryPhysicalMemory *prometheus.Desc
VMMemoryRemovedMemory *prometheus.Desc
}
// newHyperVCollector ...
func newHyperVCollector(logger log.Logger) (Collector, error) {
const subsystem = "hyperv"
// NewHyperVCollector ...
func NewHyperVCollector() (Collector, error) {
buildSubsystemName := func(component string) string { return "hyperv_" + component }
return &HyperVCollector{
logger: log.With(logger, "collector", subsystem),
HealthCritical: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("health"), "critical"),
"This counter represents the number of virtual machines with critical health",
@@ -317,27 +296,6 @@ func newHyperVCollector(logger log.Logger) (Collector, error) {
//
HostLPGuestRunTimePercent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("host_lp"), "guest_run_time_percent"),
"The percentage of time spent by the processor in guest code",
[]string{"core"},
nil,
),
HostLPHypervisorRunTimePercent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("host_lp"), "hypervisor_run_time_percent"),
"The percentage of time spent by the processor in hypervisor code",
[]string{"core"},
nil,
),
HostLPTotalRunTimePercent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("host_lp"), "total_run_time_percent"),
"The percentage of time spent by the processor in guest and hypervisor code",
[]string{"core"},
nil,
),
//
HostGuestRunTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("host_cpu"), "guest_run_time"),
"The time spent by the virtual processor in guest code",
@@ -634,69 +592,6 @@ func newHyperVCollector(logger log.Logger) (Collector, error) {
[]string{"vm_interface"},
nil,
),
//
VMMemoryAddedMemory: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "added_total"),
"This counter represents memory in MB added to the VM",
[]string{"vm"},
nil,
),
VMMemoryAveragePressure: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "pressure_average"),
"This gauge represents the average pressure in the VM.",
[]string{"vm"},
nil,
),
VMMemoryCurrentPressure: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "pressure_current"),
"This gauge represents the current pressure in the VM.",
[]string{"vm"},
nil,
),
VMMemoryGuestVisiblePhysicalMemory: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "physical_guest_visible"),
"'This gauge represents the amount of memory in MB visible to the VM guest.'",
[]string{"vm"},
nil,
),
VMMemoryMaximumPressure: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "pressure_maximum"),
"This gauge represents the maximum pressure band in the VM.",
[]string{"vm"},
nil,
),
VMMemoryMemoryAddOperations: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "add_operations_total"),
"This counter represents the number of operations adding memory to the VM.",
[]string{"vm"},
nil,
),
VMMemoryMemoryRemoveOperations: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "remove_operations_total"),
"This counter represents the number of operations removing memory from the VM.",
[]string{"vm"},
nil,
),
VMMemoryMinimumPressure: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "pressure_minimum"),
"This gauge represents the minimum pressure band in the VM.",
[]string{"vm"},
nil,
),
VMMemoryPhysicalMemory: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "physical"),
"This gauge represents the current amount of memory in MB assigned to the VM.",
[]string{"vm"},
nil,
),
VMMemoryRemovedMemory: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("vm_memory"), "removed_total"),
"This counter represents memory in MB removed from the VM",
[]string{"vm"},
nil,
),
}, nil
}
@@ -704,62 +599,52 @@ func newHyperVCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectVmHealth(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV health status metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV health status metrics:", desc, err)
return err
}
if desc, err := c.collectVmVid(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV pages metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV pages metrics:", desc, err)
return err
}
if desc, err := c.collectVmHv(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV hv status metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV hv status metrics:", desc, err)
return err
}
if desc, err := c.collectVmProcessor(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV processor metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectHostLPUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host logical processors metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV processor metrics:", desc, err)
return err
}
if desc, err := c.collectHostCpuUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host CPU metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV host CPU metrics:", desc, err)
return err
}
if desc, err := c.collectVmCpuUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV VM CPU metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV VM CPU metrics:", desc, err)
return err
}
if desc, err := c.collectVmSwitch(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV switch metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV switch metrics:", desc, err)
return err
}
if desc, err := c.collectVmEthernet(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV ethernet metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV ethernet metrics:", desc, err)
return err
}
if desc, err := c.collectVmStorage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual storage metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV virtual storage metrics:", desc, err)
return err
}
if desc, err := c.collectVmNetwork(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual network metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmMemory(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual memory metrics", "desc", desc, "err", err)
log.Error("failed collecting hyperV virtual network metrics:", desc, err)
return err
}
@@ -774,7 +659,7 @@ type Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
func (c *HyperVCollector) collectVmHealth(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -807,7 +692,7 @@ type Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition struct {
func (c *HyperVCollector) collectVmVid(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -871,7 +756,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition struct {
func (c *HyperVCollector) collectVmHv(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1009,7 +894,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisor struct {
func (c *HyperVCollector) collectVmProcessor(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisor
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1033,59 +918,6 @@ func (c *HyperVCollector) collectVmProcessor(ch chan<- prometheus.Metric) (*prom
return nil, nil
}
// Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor ...
type Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor struct {
Name string
PercentGuestRunTime uint64
PercentHypervisorRunTime uint64
PercentTotalRunTime uint
}
func (c *HyperVCollector) collectHostLPUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
for _, obj := range dst {
if strings.Contains(obj.Name, "_Total") {
continue
}
// The name format is Hv LP <core id>
parts := strings.Split(obj.Name, " ")
if len(parts) != 3 {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectHostLPUsage: %q", obj.Name))
continue
}
coreId := parts[2]
ch <- prometheus.MustNewConstMetric(
c.HostLPGuestRunTimePercent,
prometheus.GaugeValue,
float64(obj.PercentGuestRunTime),
coreId,
)
ch <- prometheus.MustNewConstMetric(
c.HostLPHypervisorRunTimePercent,
prometheus.GaugeValue,
float64(obj.PercentHypervisorRunTime),
coreId,
)
ch <- prometheus.MustNewConstMetric(
c.HostLPTotalRunTimePercent,
prometheus.GaugeValue,
float64(obj.PercentTotalRunTime),
coreId,
)
}
return nil, nil
}
// Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor ...
type Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor struct {
Name string
@@ -1097,7 +929,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor struct {
func (c *HyperVCollector) collectHostCpuUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1109,7 +941,7 @@ func (c *HyperVCollector) collectHostCpuUsage(ch chan<- prometheus.Metric) (*pro
// The name format is Root VP <core id>
parts := strings.Split(obj.Name, " ")
if len(parts) != 3 {
_ = level.Warn(c.logger).Log("msg", "Unexpected format of Name in collectHostCpuUsage: "+obj.Name)
log.Warnf("Unexpected format of Name in collectHostCpuUsage: %q", obj.Name)
continue
}
coreId := parts[2]
@@ -1158,7 +990,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor struct {
func (c *HyperVCollector) collectVmCpuUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1169,17 +1001,8 @@ func (c *HyperVCollector) collectVmCpuUsage(ch chan<- prometheus.Metric) (*prome
}
// The name format is <VM Name>:Hv VP <vcore id>
parts := strings.Split(obj.Name, ":")
if len(parts) != 2 {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectVmCpuUsage: %q, expected %q. Skipping.", obj.Name, "<VM Name>:Hv VP <vcore id>"))
continue
}
coreParts := strings.Split(parts[1], " ")
if len(coreParts) != 3 {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of core identifier in collectVmCpuUsage: %q, expected %q. Skipping.", parts[1], "Hv VP <vcore id>"))
continue
}
vmName := parts[0]
coreId := coreParts[2]
coreId := strings.Split(parts[1], " ")[2]
ch <- prometheus.MustNewConstMetric(
c.VMGuestRunTime,
@@ -1245,7 +1068,7 @@ type Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch struct {
func (c *HyperVCollector) collectVmSwitch(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1380,12 +1203,6 @@ func (c *HyperVCollector) collectVmSwitch(ch chan<- prometheus.Metric) (*prometh
float64(obj.PacketsReceivedPersec),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsSent,
prometheus.CounterValue,
float64(obj.PacketsSentPersec),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PurgedMacAddresses,
prometheus.CounterValue,
@@ -1410,7 +1227,7 @@ type Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter struct {
func (c *HyperVCollector) collectVmEthernet(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1480,7 +1297,7 @@ type Win32_PerfRawData_Counters_HyperVVirtualStorageDevice struct {
func (c *HyperVCollector) collectVmStorage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_Counters_HyperVVirtualStorageDevice
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1549,7 +1366,7 @@ type Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter struct {
func (c *HyperVCollector) collectVmNetwork(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1604,104 +1421,3 @@ func (c *HyperVCollector) collectVmNetwork(ch chan<- prometheus.Metric) (*promet
return nil, nil
}
// Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM ...
type Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM struct {
Name string
AddedMemory uint64
AveragePressure uint64
CurrentPressure uint64
GuestVisiblePhysicalMemory uint64
MaximumPressure uint64
MemoryAddOperations uint64
MemoryRemoveOperations uint64
MinimumPressure uint64
PhysicalMemory uint64
RemovedMemory uint64
}
func (c *HyperVCollector) collectVmMemory(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
for _, obj := range dst {
if strings.Contains(obj.Name, "_Total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.VMMemoryAddedMemory,
prometheus.CounterValue,
float64(obj.AddedMemory),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryAveragePressure,
prometheus.GaugeValue,
float64(obj.AveragePressure),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryCurrentPressure,
prometheus.GaugeValue,
float64(obj.CurrentPressure),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryGuestVisiblePhysicalMemory,
prometheus.GaugeValue,
float64(obj.GuestVisiblePhysicalMemory),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryMaximumPressure,
prometheus.GaugeValue,
float64(obj.MaximumPressure),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryMemoryAddOperations,
prometheus.CounterValue,
float64(obj.MemoryAddOperations),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryMemoryRemoveOperations,
prometheus.CounterValue,
float64(obj.MemoryRemoveOperations),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryMinimumPressure,
prometheus.GaugeValue,
float64(obj.MinimumPressure),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryPhysicalMemory,
prometheus.GaugeValue,
float64(obj.PhysicalMemory),
obj.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VMMemoryRemovedMemory,
prometheus.CounterValue,
float64(obj.RemovedMemory),
obj.Name,
)
}
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkHypervCollector(b *testing.B) {
benchmarkCollector(b, "hyperv", newHyperVCollector)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,49 +0,0 @@
package collector
import (
"reflect"
"testing"
)
func BenchmarkIISCollector(b *testing.B) {
benchmarkCollector(b, "iis", newIISCollector)
}
func TestIISDeduplication(t *testing.T) {
start := []perflibAPP_POOL_WAS{
{
Name: "foo",
Frequency_Object: 1,
},
{
Name: "foo1#999",
Frequency_Object: 2,
},
{
Name: "foo#2",
Frequency_Object: 3,
},
{
Name: "bar$2",
Frequency_Object: 4,
},
{
Name: "bar_2",
Frequency_Object: 5,
},
}
var expected = make(map[string]perflibAPP_POOL_WAS)
// Should be deduplicated from "foo#2"
expected["foo"] = perflibAPP_POOL_WAS{Name: "foo#2", Frequency_Object: 3}
// Map key should have suffix stripped, but struct name field should be unchanged
expected["foo1"] = perflibAPP_POOL_WAS{Name: "foo1#999", Frequency_Object: 2}
// Map key and Name should be identical, as there is no suffix starting with "#"
expected["bar$2"] = perflibAPP_POOL_WAS{Name: "bar$2", Frequency_Object: 4}
// Map key and Name should be identical, as there is no suffix starting with "#"
expected["bar_2"] = perflibAPP_POOL_WAS{Name: "bar_2", Frequency_Object: 5}
deduplicated := dedupIISNames(start)
if !reflect.DeepEqual(expected, deduplicated) {
t.Errorf("Flattened values do not match!\nExpected result: %+v\nActual result: %+v", expected, deduplicated)
}
}

View File

@@ -1,415 +0,0 @@
package collector
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
)
// collectorInit represents the required initialisation config for a collector.
type collectorInit struct {
// Name of collector to be initialised
name string
// Builder function for the collector
flags flagsBuilder
// Builder function for the collector
builder collectorBuilder
// Perflib counter names for the collector.
// These will be included in the Perflib scrape scope by the exporter.
perfCounterFunc perfCounterNamesBuilder
}
func getDFSRCollectorDeps(_ log.Logger) []string {
// Perflib sources are dynamic, depending on the enabled child collectors
var perflibDependencies []string
for _, source := range expandEnabledChildCollectors(*dfsrEnabledCollectors) {
perflibDependencies = append(perflibDependencies, dfsrGetPerfObjectName(source))
}
return perflibDependencies
}
var collectors = []collectorInit{
{
name: "ad",
flags: nil,
builder: newADCollector,
perfCounterFunc: nil,
},
{
name: "adcs",
flags: nil,
builder: adcsCollectorMethod,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Certification Authority"}
},
},
{
name: "adfs",
flags: nil,
builder: newADFSCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"AD FS"}
},
},
{
name: "cache",
flags: nil,
builder: newCacheCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Cache"}
},
},
{
name: "container",
flags: nil,
builder: newContainerMetricsCollector,
perfCounterFunc: nil,
},
{
name: "cpu",
flags: nil,
builder: newCPUCollector,
perfCounterFunc: func(logger log.Logger) []string {
if getWindowsVersion(logger) > 6.05 {
return []string{"Processor Information"}
}
return []string{"Processor"}
},
},
{
name: "cpu_info",
flags: nil,
builder: newCpuInfoCollector,
perfCounterFunc: nil,
},
{
name: "cs",
flags: nil,
builder: newCSCollector,
perfCounterFunc: nil,
},
{
name: "dfsr",
flags: newDFSRCollectorFlags,
builder: newDFSRCollector,
perfCounterFunc: getDFSRCollectorDeps,
},
{
name: "dhcp",
flags: nil,
builder: newDhcpCollector,
perfCounterFunc: nil,
},
{
name: "diskdrive",
flags: nil,
builder: newDiskDriveInfoCollector,
perfCounterFunc: nil,
},
{
name: "dns",
flags: nil,
builder: newDNSCollector,
perfCounterFunc: nil,
},
{
name: "exchange",
flags: newExchangeCollectorFlags,
builder: newExchangeCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{
"MSExchange ADAccess Processes",
"MSExchangeTransport Queues",
"MSExchange HttpProxy",
"MSExchange ActiveSync",
"MSExchange Availability Service",
"MSExchange OWA",
"MSExchangeAutodiscover",
"MSExchange WorkloadManagement Workloads",
"MSExchange RpcClientAccess",
}
},
},
{
name: "fsrmquota",
flags: nil,
builder: newFSRMQuotaCollector,
perfCounterFunc: nil,
},
{
name: "hyperv",
flags: nil,
builder: newHyperVCollector,
perfCounterFunc: nil,
},
{
name: "iis",
flags: newIISCollectorFlags,
builder: newIISCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{
"Web Service",
"APP_POOL_WAS",
"Web Service Cache",
"W3SVC_W3WP",
}
},
},
{
name: "logical_disk",
flags: newLogicalDiskCollectorFlags,
builder: newLogicalDiskCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"LogicalDisk"}
},
},
{
name: "logon",
flags: nil,
builder: newLogonCollector,
perfCounterFunc: nil,
},
{
name: "memory",
flags: nil,
builder: newMemoryCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Memory"}
},
},
{
name: "mscluster_cluster",
flags: nil,
builder: newMSCluster_ClusterCollector,
perfCounterFunc: nil,
},
{
name: "mscluster_network",
flags: nil,
builder: newMSCluster_NetworkCollector,
perfCounterFunc: nil,
},
{
name: "mscluster_node",
flags: nil,
builder: newMSCluster_NodeCollector,
perfCounterFunc: nil,
},
{
name: "mscluster_resource",
flags: nil,
builder: newMSCluster_ResourceCollector,
perfCounterFunc: nil,
},
{
name: "mscluster_resourcegroup",
flags: nil,
builder: newMSCluster_ResourceGroupCollector,
perfCounterFunc: nil,
},
{
name: "msmq",
flags: newMSMQCollectorFlags,
builder: newMSMQCollector,
perfCounterFunc: nil,
},
{
name: "mssql",
flags: newMSSQLCollectorFlags,
builder: newMSSQLCollector,
perfCounterFunc: nil,
},
{
name: "net",
flags: newNetworkCollectorFlags,
builder: newNetworkCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Network Interface"}
},
},
{
name: "netframework_clrexceptions",
flags: nil,
builder: newNETFramework_NETCLRExceptionsCollector,
perfCounterFunc: nil,
},
{
name: "netframework_clrinterop",
flags: nil,
builder: newNETFramework_NETCLRInteropCollector,
perfCounterFunc: nil,
},
{
name: "netframework_clrjit",
flags: nil,
builder: newNETFramework_NETCLRJitCollector,
perfCounterFunc: nil,
},
{
name: "netframework_clrloading",
flags: nil,
builder: newNETFramework_NETCLRLoadingCollector,
perfCounterFunc: nil,
},
{
name: "netframework_clrlocksandthreads",
flags: nil,
builder: newNETFramework_NETCLRLocksAndThreadsCollector,
perfCounterFunc: nil,
},
{
name: "netframework_clrmemory",
flags: nil,
builder: newNETFramework_NETCLRMemoryCollector,
perfCounterFunc: nil,
},
{
name: "netframework_clrremoting",
flags: nil,
builder: newNETFramework_NETCLRRemotingCollector,
perfCounterFunc: nil,
},
{
name: "netframework_clrsecurity",
flags: nil,
builder: newNETFramework_NETCLRSecurityCollector,
perfCounterFunc: nil,
},
{
name: "nps",
builder: newNPSCollector,
perfCounterFunc: nil,
},
{
name: "os",
flags: nil,
builder: newOSCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Paging File"}
},
},
{
name: "process",
flags: newProcessCollectorFlags,
builder: newProcessCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Process"}
},
},
{
name: "remote_fx",
flags: nil,
builder: newRemoteFx,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"RemoteFX Network"}
},
},
{
name: "scheduled_task",
flags: newScheduledTaskFlags,
builder: newScheduledTask,
perfCounterFunc: nil,
},
{
name: "service",
flags: newServiceCollectorFlags,
builder: newserviceCollector,
perfCounterFunc: nil,
},
{
name: "smtp",
flags: newSMTPCollectorFlags,
builder: newSMTPCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"SMTP Server"}
},
},
{
name: "system",
flags: nil,
builder: newSystemCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"System"}
},
},
{
name: "teradici_pcoip",
flags: nil,
builder: newTeradiciPcoipCollector,
perfCounterFunc: nil,
},
{
name: "tcp",
flags: nil,
builder: newTCPCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"TCPv4"}
},
},
{
name: "terminal_services",
flags: nil,
builder: newTerminalServicesCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{
"Terminal Services",
"Terminal Services Session",
"Remote Desktop Connection Broker Counterset",
}
},
},
{
name: "textfile",
flags: newTextFileCollectorFlags,
builder: newTextFileCollector,
perfCounterFunc: nil,
},
{
name: "thermalzone",
flags: nil,
builder: newThermalZoneCollector,
perfCounterFunc: nil,
},
{
name: "time",
flags: nil,
builder: newTimeCollector,
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Windows Time Service"}
},
},
{
name: "vmware",
flags: nil,
builder: newVmwareCollector,
perfCounterFunc: nil,
},
{
name: "vmware_blast",
flags: nil,
builder: newVmwareBlastCollector,
perfCounterFunc: nil,
},
}
// RegisterCollectorsFlags To be called by the exporter for collector initialisation before running app.Parse
func RegisterCollectorsFlags(app *kingpin.Application) {
for _, v := range collectors {
if v.flags != nil {
v.flags(app)
}
}
}
// RegisterCollectors To be called by the exporter for collector initialisation
func RegisterCollectors(logger log.Logger) {
for _, v := range collectors {
var perfCounterNames []string
if v.perfCounterFunc != nil {
perfCounterNames = v.perfCounterFunc(logger)
}
registerCollector(v.name, v.builder, perfCounterNames...)
}
}

View File

@@ -1,45 +1,34 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"fmt"
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
FlagLogicalDiskVolumeOldExclude = "collector.logical_disk.volume-blacklist"
FlagLogicalDiskVolumeOldInclude = "collector.logical_disk.volume-whitelist"
FlagLogicalDiskVolumeExclude = "collector.logical_disk.volume-exclude"
FlagLogicalDiskVolumeInclude = "collector.logical_disk.volume-include"
)
func init() {
registerCollector("logical_disk", NewLogicalDiskCollector, "LogicalDisk")
}
var (
volumeOldInclude *string
volumeOldExclude *string
volumeInclude *string
volumeExclude *string
volumeIncludeSet bool
volumeExcludeSet bool
volumeWhitelist = kingpin.Flag(
"collector.logical_disk.volume-whitelist",
"Regexp of volumes to whitelist. Volume name must both match whitelist and not match blacklist to be included.",
).Default(".+").String()
volumeBlacklist = kingpin.Flag(
"collector.logical_disk.volume-blacklist",
"Regexp of volumes to blacklist. Volume name must both match whitelist and not match blacklist to be included.",
).Default("").String()
)
// A LogicalDiskCollector is a Prometheus collector for perflib logicalDisk metrics
type LogicalDiskCollector struct {
logger log.Logger
RequestsQueued *prometheus.Desc
AvgReadQueue *prometheus.Desc
AvgWriteQueue *prometheus.Desc
ReadBytesTotal *prometheus.Desc
ReadsTotal *prometheus.Desc
WriteBytesTotal *prometheus.Desc
@@ -54,63 +43,15 @@ type LogicalDiskCollector struct {
WriteLatency *prometheus.Desc
ReadWriteLatency *prometheus.Desc
volumeIncludePattern *regexp.Regexp
volumeExcludePattern *regexp.Regexp
volumeWhitelistPattern *regexp.Regexp
volumeBlacklistPattern *regexp.Regexp
}
// newLogicalDiskCollectorFlags ...
func newLogicalDiskCollectorFlags(app *kingpin.Application) {
volumeInclude = app.Flag(
FlagLogicalDiskVolumeInclude,
"Regexp of volumes to include. Volume name must both match include and not match exclude to be included.",
).Default(".+").PreAction(func(c *kingpin.ParseContext) error {
volumeIncludeSet = true
return nil
}).String()
volumeExclude = app.Flag(
FlagLogicalDiskVolumeExclude,
"Regexp of volumes to exclude. Volume name must both match include and not match exclude to be included.",
).Default("").PreAction(func(c *kingpin.ParseContext) error {
volumeExcludeSet = true
return nil
}).String()
volumeOldInclude = app.Flag(
FlagLogicalDiskVolumeOldInclude,
"DEPRECATED: Use --collector.logical_disk.volume-include",
).Hidden().String()
volumeOldExclude = app.Flag(
FlagLogicalDiskVolumeOldExclude,
"DEPRECATED: Use --collector.logical_disk.volume-exclude",
).Hidden().String()
}
// newLogicalDiskCollector ...
func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
// NewLogicalDiskCollector ...
func NewLogicalDiskCollector() (Collector, error) {
const subsystem = "logical_disk"
logger = log.With(logger, "collector", subsystem)
if *volumeOldExclude != "" {
if !volumeExcludeSet {
_ = level.Warn(logger).Log("msg", "--collector.logical_disk.volume-blacklist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-exclude")
*volumeExclude = *volumeOldExclude
} else {
return nil, errors.New("--collector.logical_disk.volume-blacklist and --collector.logical_disk.volume-exclude are mutually exclusive")
}
}
if *volumeOldInclude != "" {
if !volumeIncludeSet {
_ = level.Warn(logger).Log("msg", "--collector.logical_disk.volume-whitelist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-include")
*volumeInclude = *volumeOldInclude
} else {
return nil, errors.New("--collector.logical_disk.volume-whitelist and --collector.logical_disk.volume-include are mutually exclusive")
}
}
return &LogicalDiskCollector{
logger: logger,
RequestsQueued: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "requests_queued"),
"The number of requests queued to the disk (LogicalDisk.CurrentDiskQueueLength)",
@@ -118,20 +59,6 @@ func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
nil,
),
AvgReadQueue: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "avg_read_requests_queued"),
"Average number of read requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskReadQueueLength)",
[]string{"volume"},
nil,
),
AvgWriteQueue: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "avg_write_requests_queued"),
"Average number of write requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskWriteQueueLength)",
[]string{"volume"},
nil,
),
ReadBytesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "read_bytes_total"),
"The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec)",
@@ -176,14 +103,14 @@ func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
FreeSpace: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "free_bytes"),
"Free space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace)",
"Free space in bytes (LogicalDisk.PercentFreeSpace)",
[]string{"volume"},
nil,
),
TotalSpace: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "size_bytes"),
"Total space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace_Base)",
"Total space in bytes (LogicalDisk.PercentFreeSpace_Base)",
[]string{"volume"},
nil,
),
@@ -223,8 +150,8 @@ func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
nil,
),
volumeIncludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeInclude)),
volumeExcludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeExclude)),
volumeWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeWhitelist)),
volumeBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeBlacklist)),
}, nil
}
@@ -232,7 +159,7 @@ func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting logical_disk metrics", "desc", desc, "err", err)
log.Error("failed collecting logical_disk metrics:", desc, err)
return err
}
return nil
@@ -242,35 +169,33 @@ func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
type logicalDisk struct {
Name string
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
AvgDiskReadQueueLength float64 `perflib:"Avg. Disk Read Queue Length"`
AvgDiskWriteQueueLength float64 `perflib:"Avg. Disk Write Queue Length"`
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
PercentIdleTime float64 `perflib:"% Idle Time"`
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
Name string
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
PercentIdleTime float64 `perflib:"% Idle Time"`
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
}
func (c *LogicalDiskCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []logicalDisk
if err := unmarshalObject(ctx.perfObjects["LogicalDisk"], &dst, c.logger); err != nil {
if err := unmarshalObject(ctx.perfObjects["LogicalDisk"], &dst); err != nil {
return nil, err
}
for _, volume := range dst {
if volume.Name == "_Total" ||
c.volumeExcludePattern.MatchString(volume.Name) ||
!c.volumeIncludePattern.MatchString(volume.Name) {
c.volumeBlacklistPattern.MatchString(volume.Name) ||
!c.volumeWhitelistPattern.MatchString(volume.Name) {
continue
}
@@ -281,20 +206,6 @@ func (c *LogicalDiskCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.AvgReadQueue,
prometheus.GaugeValue,
volume.AvgDiskReadQueueLength*ticksToSecondsScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.AvgWriteQueue,
prometheus.GaugeValue,
volume.AvgDiskWriteQueueLength*ticksToSecondsScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTotal,
prometheus.CounterValue,

View File

@@ -1,13 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkLogicalDiskCollector(b *testing.B) {
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all disks.
localVolumeInclude := ".+"
volumeInclude = &localVolumeInclude
benchmarkCollector(b, "logical_disk", newLogicalDiskCollector)
}

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package collector
@@ -6,25 +5,25 @@ package collector
import (
"errors"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("logon", NewLogonCollector)
}
// A LogonCollector is a Prometheus collector for WMI metrics
type LogonCollector struct {
logger log.Logger
LogonType *prometheus.Desc
}
// newLogonCollector ...
func newLogonCollector(logger log.Logger) (Collector, error) {
// NewLogonCollector ...
func NewLogonCollector() (Collector, error) {
const subsystem = "logon"
return &LogonCollector{
logger: log.With(logger, "collector", subsystem),
LogonType: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "logon_type"),
"Number of active logon sessions (LogonSession.LogonType)",
@@ -38,7 +37,7 @@ func newLogonCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *LogonCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
log.Error("failed collecting user metrics:", desc, err)
return err
}
return nil
@@ -52,7 +51,7 @@ type Win32_LogonSession struct {
func (c *LogonCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_LogonSession
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkLogonCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newLogonCollector)
}

View File

@@ -1,21 +1,21 @@
// returns data points from Win32_PerfRawData_PerfOS_Memory
// <add link to documentation here> - Win32_PerfRawData_PerfOS_Memory class
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("memory", NewMemoryCollector, "Memory")
}
// A MemoryCollector is a Prometheus collector for perflib Memory metrics
type MemoryCollector struct {
logger log.Logger
AvailableBytes *prometheus.Desc
CacheBytes *prometheus.Desc
CacheBytesPeak *prometheus.Desc
@@ -50,13 +50,11 @@ type MemoryCollector struct {
WriteCopiesTotal *prometheus.Desc
}
// newMemoryCollector ...
func newMemoryCollector(logger log.Logger) (Collector, error) {
// NewMemoryCollector ...
func NewMemoryCollector() (Collector, error) {
const subsystem = "memory"
return &MemoryCollector{
logger: log.With(logger, "collector", subsystem),
AvailableBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "available_bytes"),
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
@@ -78,8 +76,7 @@ func newMemoryCollector(logger log.Logger) (Collector, error) {
),
CacheFaultsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cache_faults_total"),
"Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) "+
"or from disk (hard fault) (Cache Faults/sec)",
"(CacheFaultsPersec)",
nil,
nil,
),
@@ -98,14 +95,13 @@ func newMemoryCollector(logger log.Logger) (Collector, error) {
DemandZeroFaultsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "demand_zero_faults_total"),
"The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security"+
" feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (Demand Zero Faults/sec)",
" feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (DemandZeroFaults)",
nil,
nil,
),
FreeAndZeroPageListBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "free_and_zero_page_list_bytes"),
"The amount of physical memory, in bytes, that is assigned to the free and zero page lists. This memory does not contain cached data. It is immediately"+
" available for allocation to a process or for system use (FreeAndZeroPageListBytes)",
"(FreeAndZeroPageListBytes)",
nil,
nil,
),
@@ -117,14 +113,13 @@ func newMemoryCollector(logger log.Logger) (Collector, error) {
),
ModifiedPageListBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "modified_page_list_bytes"),
"The amount of physical memory, in bytes, that is assigned to the modified page list. This memory contains cached data and code that is not actively in "+
"use by processes, the system and the system cache (ModifiedPageListBytes)",
"(ModifiedPageListBytes)",
nil,
nil,
),
PageFaultsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "page_faults_total"),
"Overall rate at which faulted pages are handled by the processor (Page Faults/sec)",
"(PageFaultsPersec)",
nil,
nil,
),
@@ -166,15 +161,14 @@ func newMemoryCollector(logger log.Logger) (Collector, error) {
nil,
),
PoolNonpagedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_bytes"),
"Number of bytes in the non-paged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must "+
"remain in physical memory as long as they are allocated (PoolNonpagedBytes)",
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_bytes_total"),
"(PoolNonpagedBytes)",
nil,
nil,
),
PoolPagedAllocsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_allocs_total"),
"Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call (PoolPagedAllocs)",
"(PoolPagedAllocs)",
nil,
nil,
),
@@ -186,72 +180,67 @@ func newMemoryCollector(logger log.Logger) (Collector, error) {
),
PoolPagedResidentBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_resident_bytes"),
"The size, in bytes, of the portion of the paged pool that is currently resident and active in physical memory. The paged pool is an area of the "+
"system virtual memory that is used for objects that can be written to disk when they are not being used (PoolPagedResidentBytes)",
"(PoolPagedResidentBytes)",
nil,
nil,
),
StandbyCacheCoreBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_core_bytes"),
"The amount of physical memory, in bytes, that is assigned to the core standby cache page lists. This memory contains cached data and code that is "+
"not actively in use by processes, the system and the system cache (StandbyCacheCoreBytes)",
"(StandbyCacheCoreBytes)",
nil,
nil,
),
StandbyCacheNormalPriorityBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_normal_priority_bytes"),
"The amount of physical memory, in bytes, that is assigned to the normal priority standby cache page lists. This memory contains cached data and "+
"code that is not actively in use by processes, the system and the system cache (StandbyCacheNormalPriorityBytes)",
"(StandbyCacheNormalPriorityBytes)",
nil,
nil,
),
StandbyCacheReserveBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_reserve_bytes"),
"The amount of physical memory, in bytes, that is assigned to the reserve standby cache page lists. This memory contains cached data and code "+
"that is not actively in use by processes, the system and the system cache (StandbyCacheReserveBytes)",
"(StandbyCacheReserveBytes)",
nil,
nil,
),
SystemCacheResidentBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "system_cache_resident_bytes"),
"The size, in bytes, of the portion of the system file cache which is currently resident and active in physical memory (SystemCacheResidentBytes)",
"(SystemCacheResidentBytes)",
nil,
nil,
),
SystemCodeResidentBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "system_code_resident_bytes"),
"The size, in bytes, of the pageable operating system code that is currently resident and active in physical memory (SystemCodeResidentBytes)",
"(SystemCodeResidentBytes)",
nil,
nil,
),
SystemCodeTotalBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "system_code_total_bytes"),
"The size, in bytes, of the pageable operating system code currently mapped into the system virtual address space (SystemCodeTotalBytes)",
"(SystemCodeTotalBytes)",
nil,
nil,
),
SystemDriverResidentBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "system_driver_resident_bytes"),
"The size, in bytes, of the pageable physical memory being used by device drivers. It is the working set (physical memory area) of the drivers (SystemDriverResidentBytes)",
"(SystemDriverResidentBytes)",
nil,
nil,
),
SystemDriverTotalBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "system_driver_total_bytes"),
"The size, in bytes, of the pageable virtual memory currently being used by device drivers. Pageable memory can be written to disk when it is not being used (SystemDriverTotalBytes)",
"(SystemDriverTotalBytes)",
nil,
nil,
),
TransitionFaultsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "transition_faults_total"),
"Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the "+
"modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPersec)",
"(TransitionFaultsPersec)",
nil,
nil,
),
TransitionPagesRepurposedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "transition_pages_repurposed_total"),
"Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPersec)",
"(TransitionPagesRePurposedPersec)",
nil,
nil,
),
@@ -268,7 +257,7 @@ func newMemoryCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting memory metrics", "desc", desc, "err", err)
log.Error("failed collecting memory metrics:", desc, err)
return err
}
return nil
@@ -313,7 +302,7 @@ type memory struct {
func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []memory
if err := unmarshalObject(ctx.perfObjects["Memory"], &dst, c.logger); err != nil {
if err := unmarshalObject(ctx.perfObjects["Memory"], &dst); err != nil {
return nil, err
}
@@ -337,7 +326,7 @@ func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metri
ch <- prometheus.MustNewConstMetric(
c.CacheFaultsTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].CacheFaultsPersec,
)
@@ -355,7 +344,7 @@ func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metri
ch <- prometheus.MustNewConstMetric(
c.DemandZeroFaultsTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].DemandZeroFaultsPersec,
)
@@ -379,37 +368,37 @@ func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metri
ch <- prometheus.MustNewConstMetric(
c.PageFaultsTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].PageFaultsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPageReadsTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].PageReadsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPagesReadTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].PagesInputPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPagesWrittenTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].PagesOutputPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPageOperationsTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].PagesPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPageWritesTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].PageWritesPersec,
)
@@ -427,7 +416,7 @@ func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metri
ch <- prometheus.MustNewConstMetric(
c.PoolPagedAllocsTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].PoolPagedAllocs,
)
@@ -493,19 +482,19 @@ func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metri
ch <- prometheus.MustNewConstMetric(
c.TransitionFaultsTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].TransitionFaultsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.TransitionPagesRepurposedTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].TransitionPagesRePurposedPersec,
)
ch <- prometheus.MustNewConstMetric(
c.WriteCopiesTotal,
prometheus.CounterValue,
prometheus.GaugeValue,
dst[0].WriteCopiesPersec,
)

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkMemoryCollector(b *testing.B) {
benchmarkCollector(b, "memory", newMemoryCollector)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,117 +0,0 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_NetworkCollector is a Prometheus collector for WMI MSCluster_Network metrics
type MSCluster_NetworkCollector struct {
logger log.Logger
Characteristics *prometheus.Desc
Flags *prometheus.Desc
Metric *prometheus.Desc
Role *prometheus.Desc
State *prometheus.Desc
}
func newMSCluster_NetworkCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_network"
return &MSCluster_NetworkCollector{
logger: log.With(logger, "collector", subsystem),
Characteristics: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
"Provides the characteristics of the network.",
[]string{"name"},
nil,
),
Flags: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "flags"),
"Provides access to the flags set for the node. ",
[]string{"name"},
nil,
),
Metric: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "metric"),
"The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false.",
[]string{"name"},
nil,
),
Role: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "role"),
"Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both ",
[]string{"name"},
nil,
),
State: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up",
[]string{"name"},
nil,
),
}, nil
}
// MSCluster_Network docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-network
type MSCluster_Network struct {
Name string
Characteristics uint
Flags uint
Metric uint
Role uint
State uint
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *MSCluster_NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Network
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Metric,
prometheus.GaugeValue,
float64(v.Metric),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Role,
prometheus.GaugeValue,
float64(v.Role),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
}
return nil
}

View File

@@ -1,252 +0,0 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_NodeCollector is a Prometheus collector for WMI MSCluster_Node metrics
type MSCluster_NodeCollector struct {
logger log.Logger
BuildNumber *prometheus.Desc
Characteristics *prometheus.Desc
DetectedCloudPlatform *prometheus.Desc
DynamicWeight *prometheus.Desc
Flags *prometheus.Desc
MajorVersion *prometheus.Desc
MinorVersion *prometheus.Desc
NeedsPreventQuorum *prometheus.Desc
NodeDrainStatus *prometheus.Desc
NodeHighestVersion *prometheus.Desc
NodeLowestVersion *prometheus.Desc
NodeWeight *prometheus.Desc
State *prometheus.Desc
StatusInformation *prometheus.Desc
}
func newMSCluster_NodeCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_node"
return &MSCluster_NodeCollector{
logger: log.With(logger, "collector", subsystem),
BuildNumber: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "build_number"),
"Provides access to the node's BuildNumber property.",
[]string{"name"},
nil,
),
Characteristics: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
"Provides access to the characteristics set for the node.",
[]string{"name"},
nil,
),
DetectedCloudPlatform: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "detected_cloud_platform"),
"(DetectedCloudPlatform)",
[]string{"name"},
nil,
),
DynamicWeight: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "dynamic_weight"),
"The dynamic vote weight of the node adjusted by dynamic quorum feature.",
[]string{"name"},
nil,
),
Flags: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "flags"),
"Provides access to the flags set for the node.",
[]string{"name"},
nil,
),
MajorVersion: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "major_version"),
"Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed.",
[]string{"name"},
nil,
),
MinorVersion: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "minor_version"),
"Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed.",
[]string{"name"},
nil,
),
NeedsPreventQuorum: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "needs_prevent_quorum"),
"Whether the cluster service on that node should be started with prevent quorum flag.",
[]string{"name"},
nil,
),
NodeDrainStatus: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "node_drain_status"),
"The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed",
[]string{"name"},
nil,
),
NodeHighestVersion: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "node_highest_version"),
"Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
),
NodeLowestVersion: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "node_lowest_version"),
"Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
),
NodeWeight: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "node_weight"),
"The vote weight of the node.",
[]string{"name"},
nil,
),
State: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining",
[]string{"name"},
nil,
),
StatusInformation: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "status_information"),
"The isolation or quarantine status of the node.",
[]string{"name"},
nil,
),
}, nil
}
// MSCluster_Node docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-node
type MSCluster_Node struct {
Name string
BuildNumber uint
Characteristics uint
DetectedCloudPlatform uint
DynamicWeight uint
Flags uint
MajorVersion uint
MinorVersion uint
NeedsPreventQuorum uint
NodeDrainStatus uint
NodeHighestVersion uint
NodeLowestVersion uint
NodeWeight uint
State uint
StatusInformation uint
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *MSCluster_NodeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Node
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.BuildNumber,
prometheus.GaugeValue,
float64(v.BuildNumber),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DetectedCloudPlatform,
prometheus.GaugeValue,
float64(v.DetectedCloudPlatform),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DynamicWeight,
prometheus.GaugeValue,
float64(v.DynamicWeight),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MajorVersion,
prometheus.GaugeValue,
float64(v.MajorVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MinorVersion,
prometheus.GaugeValue,
float64(v.MinorVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NeedsPreventQuorum,
prometheus.GaugeValue,
float64(v.NeedsPreventQuorum),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeDrainStatus,
prometheus.GaugeValue,
float64(v.NodeDrainStatus),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeHighestVersion,
prometheus.GaugeValue,
float64(v.NodeHighestVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeLowestVersion,
prometheus.GaugeValue,
float64(v.NodeLowestVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeWeight,
prometheus.GaugeValue,
float64(v.NodeWeight),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.StatusInformation,
prometheus.GaugeValue,
float64(v.StatusInformation),
v.Name,
)
}
return nil
}

View File

@@ -1,284 +0,0 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_ResourceCollector is a Prometheus collector for WMI MSCluster_Resource metrics
type MSCluster_ResourceCollector struct {
logger log.Logger
Characteristics *prometheus.Desc
DeadlockTimeout *prometheus.Desc
EmbeddedFailureAction *prometheus.Desc
Flags *prometheus.Desc
IsAlivePollInterval *prometheus.Desc
LooksAlivePollInterval *prometheus.Desc
MonitorProcessId *prometheus.Desc
PendingTimeout *prometheus.Desc
ResourceClass *prometheus.Desc
RestartAction *prometheus.Desc
RestartDelay *prometheus.Desc
RestartPeriod *prometheus.Desc
RestartThreshold *prometheus.Desc
RetryPeriodOnFailure *prometheus.Desc
State *prometheus.Desc
Subclass *prometheus.Desc
}
func newMSCluster_ResourceCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_resource"
return &MSCluster_ResourceCollector{
logger: log.With(logger, "collector", subsystem),
Characteristics: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
"Provides the characteristics of the object.",
[]string{"type", "owner_group", "name"},
nil,
),
DeadlockTimeout: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "deadlock_timeout"),
"Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource.",
[]string{"type", "owner_group", "name"},
nil,
),
EmbeddedFailureAction: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "embedded_failure_action"),
"The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
),
Flags: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "flags"),
"Provides access to the flags set for the object.",
[]string{"type", "owner_group", "name"},
nil,
),
IsAlivePollInterval: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "is_alive_poll_interval"),
"Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
),
LooksAlivePollInterval: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "looks_alive_poll_interval"),
"Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
),
MonitorProcessId: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "monitor_process_id"),
"Provides the process ID of the resource host service that is currently hosting the resource.",
[]string{"type", "owner_group", "name"},
nil,
),
PendingTimeout: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pending_timeout"),
"Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated.",
[]string{"type", "owner_group", "name"},
nil,
),
ResourceClass: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "resource_class"),
"Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown ",
[]string{"type", "owner_group", "name"},
nil,
),
RestartAction: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "restart_action"),
"Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails.",
[]string{"type", "owner_group", "name"},
nil,
),
RestartDelay: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "restart_delay"),
"Indicates the time delay before a failed resource is restarted.",
[]string{"type", "owner_group", "name"},
nil,
),
RestartPeriod: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "restart_period"),
"Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource.",
[]string{"type", "owner_group", "name"},
nil,
),
RestartThreshold: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "restart_threshold"),
"Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property.",
[]string{"type", "owner_group", "name"},
nil,
),
RetryPeriodOnFailure: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "retry_period_on_failure"),
"Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
),
State: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending ",
[]string{"type", "owner_group", "name"},
nil,
),
Subclass: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "subclass"),
"Provides the list of references to nodes that can be the owner of this resource.",
[]string{"type", "owner_group", "name"},
nil,
),
}, nil
}
// MSCluster_Resource docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resource
type MSCluster_Resource struct {
Name string
Type string
OwnerGroup string
Characteristics uint
DeadlockTimeout uint
EmbeddedFailureAction uint
Flags uint
IsAlivePollInterval uint
LooksAlivePollInterval uint
MonitorProcessId uint
PendingTimeout uint
ResourceClass uint
RestartAction uint
RestartDelay uint
RestartPeriod uint
RestartThreshold uint
RetryPeriodOnFailure uint
State uint
Subclass uint
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *MSCluster_ResourceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Resource
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DeadlockTimeout,
prometheus.GaugeValue,
float64(v.DeadlockTimeout),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.EmbeddedFailureAction,
prometheus.GaugeValue,
float64(v.EmbeddedFailureAction),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.IsAlivePollInterval,
prometheus.GaugeValue,
float64(v.IsAlivePollInterval),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.LooksAlivePollInterval,
prometheus.GaugeValue,
float64(v.LooksAlivePollInterval),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MonitorProcessId,
prometheus.GaugeValue,
float64(v.MonitorProcessId),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PendingTimeout,
prometheus.GaugeValue,
float64(v.PendingTimeout),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ResourceClass,
prometheus.GaugeValue,
float64(v.ResourceClass),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartAction,
prometheus.GaugeValue,
float64(v.RestartAction),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartDelay,
prometheus.GaugeValue,
float64(v.RestartDelay),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartPeriod,
prometheus.GaugeValue,
float64(v.RestartPeriod),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartThreshold,
prometheus.GaugeValue,
float64(v.RestartThreshold),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RetryPeriodOnFailure,
prometheus.GaugeValue,
float64(v.RetryPeriodOnFailure),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
prometheus.GaugeValue,
float64(v.State),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Subclass,
prometheus.GaugeValue,
float64(v.Subclass),
v.Type, v.OwnerGroup, v.Name,
)
}
return nil
}

View File

@@ -1,240 +0,0 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_ResourceGroupCollector is a Prometheus collector for WMI MSCluster_ResourceGroup metrics
type MSCluster_ResourceGroupCollector struct {
logger log.Logger
AutoFailbackType *prometheus.Desc
Characteristics *prometheus.Desc
ColdStartSetting *prometheus.Desc
DefaultOwner *prometheus.Desc
FailbackWindowEnd *prometheus.Desc
FailbackWindowStart *prometheus.Desc
FailoverPeriod *prometheus.Desc
FailoverThreshold *prometheus.Desc
FaultDomain *prometheus.Desc
Flags *prometheus.Desc
GroupType *prometheus.Desc
PlacementOptions *prometheus.Desc
Priority *prometheus.Desc
ResiliencyPeriod *prometheus.Desc
State *prometheus.Desc
}
func newMSCluster_ResourceGroupCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_resourcegroup"
return &MSCluster_ResourceGroupCollector{
logger: log.With(logger, "collector", subsystem),
AutoFailbackType: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "auto_failback_type"),
"Provides access to the group's AutoFailbackType property.",
[]string{"name"},
nil,
),
Characteristics: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
"Provides the characteristics of the group.",
[]string{"name"},
nil,
),
ColdStartSetting: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cold_start_setting"),
"Indicates whether a group can start after a cluster cold start.",
[]string{"name"},
nil,
),
DefaultOwner: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "default_owner"),
"Number of the last node the resource group was activated on or explicitly moved to.",
[]string{"name"},
nil,
),
FailbackWindowEnd: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failback_window_end"),
"The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
),
FailbackWindowStart: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failback_window_start"),
"The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
),
FailoverPeriod: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_period"),
"The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur.",
[]string{"name"},
nil,
),
FailoverThreshold: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_threshold"),
"The FailoverThreshold property specifies the maximum number of failover attempts.",
[]string{"name"},
nil,
),
Flags: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "flags"),
"Provides access to the flags set for the group. ",
[]string{"name"},
nil,
),
GroupType: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "group_type"),
"The Type of the resource group.",
[]string{"name"},
nil,
),
Priority: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "priority"),
"Priority value of the resource group",
[]string{"name"},
nil,
),
ResiliencyPeriod: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "resiliency_period"),
"The resiliency period for this group, in seconds.",
[]string{"name"},
nil,
),
State: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending",
[]string{"name"},
nil,
),
}, nil
}
// MSCluster_ResourceGroup docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resourcegroup
type MSCluster_ResourceGroup struct {
Name string
AutoFailbackType uint
Characteristics uint
ColdStartSetting uint
DefaultOwner uint
FailbackWindowEnd int
FailbackWindowStart int
FailoverPeriod uint
FailoverThreshold uint
Flags uint
GroupType uint
Priority uint
ResiliencyPeriod uint
State uint
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *MSCluster_ResourceGroupCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_ResourceGroup
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.AutoFailbackType,
prometheus.GaugeValue,
float64(v.AutoFailbackType),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ColdStartSetting,
prometheus.GaugeValue,
float64(v.ColdStartSetting),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DefaultOwner,
prometheus.GaugeValue,
float64(v.DefaultOwner),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailbackWindowEnd,
prometheus.GaugeValue,
float64(v.FailbackWindowEnd),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailbackWindowStart,
prometheus.GaugeValue,
float64(v.FailbackWindowStart),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverPeriod,
prometheus.GaugeValue,
float64(v.FailoverPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverThreshold,
prometheus.GaugeValue,
float64(v.FailoverThreshold),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.GroupType,
prometheus.GaugeValue,
float64(v.GroupType),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Priority,
prometheus.GaugeValue,
float64(v.Priority),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ResiliencyPeriod,
prometheus.GaugeValue,
float64(v.ResiliencyPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
}
return nil
}

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package collector
@@ -6,25 +5,22 @@ package collector
import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
FlagMsmqWhereClause = "collector.msmq.msmq-where"
)
func init() {
registerCollector("msmq", NewMSMQCollector)
}
var (
msmqWhereClause *string
msmqWhereClause = kingpin.Flag("collector.msmq.msmq-where", "WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response.").String()
)
// A Win32_PerfRawData_MSMQ_MSMQQueueCollector is a Prometheus collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics
type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
logger log.Logger
BytesinJournalQueue *prometheus.Desc
BytesinQueue *prometheus.Desc
MessagesinJournalQueue *prometheus.Desc
@@ -33,23 +29,15 @@ type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
queryWhereClause string
}
// newMSMQCollectorFlags ..
func newMSMQCollectorFlags(app *kingpin.Application) {
msmqWhereClause = app.Flag(FlagMsmqWhereClause, "WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response.").String()
}
// NewWin32_PerfRawData_MSMQ_MSMQQueueCollector ...
func newMSMQCollector(logger log.Logger) (Collector, error) {
func NewMSMQCollector() (Collector, error) {
const subsystem = "msmq"
logger = log.With(logger, "collector", subsystem)
if *msmqWhereClause == "" {
_ = level.Warn(logger).Log("msg", "No where-clause specified for msmq collector. This will generate a very large number of metrics!")
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
}
return &Win32_PerfRawData_MSMQ_MSMQQueueCollector{
logger: logger,
BytesinJournalQueue: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_journal_queue"),
"Size of queue journal in bytes",
@@ -82,7 +70,7 @@ func newMSMQCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting msmq metrics", "desc", desc, "err", err)
log.Error("failed collecting msmq metrics:", desc, err)
return err
}
return nil
@@ -99,33 +87,35 @@ type Win32_PerfRawData_MSMQ_MSMQQueue struct {
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_MSMQ_MSMQQueue
q := queryAllWhere(&dst, c.queryWhereClause, c.logger)
q := queryAllWhere(&dst, c.queryWhereClause)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
for _, msmq := range dst {
if msmq.Name == "Computer Queues" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.BytesinJournalQueue,
prometheus.GaugeValue,
float64(msmq.BytesinJournalQueue),
strings.ToLower(msmq.Name),
)
ch <- prometheus.MustNewConstMetric(
c.BytesinQueue,
prometheus.GaugeValue,
float64(msmq.BytesinQueue),
strings.ToLower(msmq.Name),
)
ch <- prometheus.MustNewConstMetric(
c.MessagesinJournalQueue,
prometheus.GaugeValue,
float64(msmq.MessagesinJournalQueue),
strings.ToLower(msmq.Name),
)
ch <- prometheus.MustNewConstMetric(
c.MessagesinQueue,
prometheus.GaugeValue,

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkMsmqCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newMSMQCollector)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkMSSQLCollector(b *testing.B) {
benchmarkCollector(b, "mssql", newMSSQLCollector)
}

View File

@@ -1,48 +1,37 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"fmt"
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
FlagNicOldExclude = "collector.net.nic-blacklist"
FlagNicOldInclude = "collector.net.nic-whitelist"
FlagNicExclude = "collector.net.nic-exclude"
FlagNicInclude = "collector.net.nic-include"
)
func init() {
registerCollector("net", NewNetworkCollector, "Network Interface")
}
var (
nicOldInclude *string
nicOldExclude *string
nicInclude *string
nicExclude *string
nicIncludeSet bool
nicExcludeSet bool
nicWhitelist = kingpin.Flag(
"collector.net.nic-whitelist",
"Regexp of NIC:s to whitelist. NIC name must both match whitelist and not match blacklist to be included.",
).Default(".+").String()
nicBlacklist = kingpin.Flag(
"collector.net.nic-blacklist",
"Regexp of NIC:s to blacklist. NIC name must both match whitelist and not match blacklist to be included.",
).Default("").String()
nicNameToUnderscore = regexp.MustCompile("[^a-zA-Z0-9]")
)
// A NetworkCollector is a Prometheus collector for Perflib Network Interface metrics
type NetworkCollector struct {
logger log.Logger
BytesReceivedTotal *prometheus.Desc
BytesSentTotal *prometheus.Desc
BytesTotal *prometheus.Desc
OutputQueueLength *prometheus.Desc
PacketsOutboundDiscarded *prometheus.Desc
PacketsOutboundErrors *prometheus.Desc
PacketsTotal *prometheus.Desc
@@ -53,63 +42,15 @@ type NetworkCollector struct {
PacketsSentTotal *prometheus.Desc
CurrentBandwidth *prometheus.Desc
nicIncludePattern *regexp.Regexp
nicExcludePattern *regexp.Regexp
nicWhitelistPattern *regexp.Regexp
nicBlacklistPattern *regexp.Regexp
}
// newNetworkCollectorFlags ...
func newNetworkCollectorFlags(app *kingpin.Application) {
nicInclude = app.Flag(
FlagNicInclude,
"Regexp of NIC:s to include. NIC name must both match include and not match exclude to be included.",
).Default(".+").PreAction(func(c *kingpin.ParseContext) error {
nicIncludeSet = true
return nil
}).String()
nicExclude = app.Flag(
FlagNicExclude,
"Regexp of NIC:s to exclude. NIC name must both match include and not match exclude to be included.",
).Default("").PreAction(func(c *kingpin.ParseContext) error {
nicExcludeSet = true
return nil
}).String()
nicOldInclude = app.Flag(
FlagNicOldInclude,
"DEPRECATED: Use --collector.net.nic-include",
).Hidden().String()
nicOldExclude = app.Flag(
FlagNicOldExclude,
"DEPRECATED: Use --collector.net.nic-exclude",
).Hidden().String()
}
// newNetworkCollector ...
func newNetworkCollector(logger log.Logger) (Collector, error) {
// NewNetworkCollector ...
func NewNetworkCollector() (Collector, error) {
const subsystem = "net"
logger = log.With(logger, "collector", subsystem)
if *nicOldExclude != "" {
if !nicExcludeSet {
_ = level.Warn(logger).Log("msg", "--collector.net.nic-blacklist is DEPRECATED and will be removed in a future release, use --collector.net.nic-exclude")
*nicExclude = *nicOldExclude
} else {
return nil, errors.New("--collector.net.nic-blacklist and --collector.net.nic-exclude are mutually exclusive")
}
}
if *nicOldInclude != "" {
if !nicIncludeSet {
_ = level.Warn(logger).Log("msg", "--collector.net.nic-whitelist is DEPRECATED and will be removed in a future release, use --collector.net.nic-include")
*nicInclude = *nicOldInclude
} else {
return nil, errors.New("--collector.net.nic-whitelist and --collector.net.nic-include are mutually exclusive")
}
}
return &NetworkCollector{
logger: logger,
BytesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "bytes_received_total"),
"(Network.BytesReceivedPerSec)",
@@ -128,32 +69,26 @@ func newNetworkCollector(logger log.Logger) (Collector, error) {
[]string{"nic"},
nil,
),
OutputQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "output_queue_length_packets"),
"(Network.OutputQueueLength)",
[]string{"nic"},
nil,
),
PacketsOutboundDiscarded: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_discarded_total"),
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_discarded"),
"(Network.PacketsOutboundDiscarded)",
[]string{"nic"},
nil,
),
PacketsOutboundErrors: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_errors_total"),
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_errors"),
"(Network.PacketsOutboundErrors)",
[]string{"nic"},
nil,
),
PacketsReceivedDiscarded: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_received_discarded_total"),
prometheus.BuildFQName(Namespace, subsystem, "packets_received_discarded"),
"(Network.PacketsReceivedDiscarded)",
[]string{"nic"},
nil,
),
PacketsReceivedErrors: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_received_errors_total"),
prometheus.BuildFQName(Namespace, subsystem, "packets_received_errors"),
"(Network.PacketsReceivedErrors)",
[]string{"nic"},
nil,
@@ -165,7 +100,7 @@ func newNetworkCollector(logger log.Logger) (Collector, error) {
nil,
),
PacketsReceivedUnknown: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_received_unknown_total"),
prometheus.BuildFQName(Namespace, subsystem, "packets_received_unknown"),
"(Network.PacketsReceivedUnknown)",
[]string{"nic"},
nil,
@@ -183,14 +118,14 @@ func newNetworkCollector(logger log.Logger) (Collector, error) {
nil,
),
CurrentBandwidth: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "current_bandwidth_bytes"),
prometheus.BuildFQName(Namespace, subsystem, "current_bandwidth"),
"(Network.CurrentBandwidth)",
[]string{"nic"},
nil,
),
nicIncludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicInclude)),
nicExcludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicExclude)),
nicWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicWhitelist)),
nicBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicBlacklist)),
}, nil
}
@@ -198,7 +133,7 @@ func newNetworkCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting net metrics", "desc", desc, "err", err)
log.Error("failed collecting net metrics:", desc, err)
return err
}
return nil
@@ -217,7 +152,6 @@ type networkInterface struct {
BytesSentPerSec float64 `perflib:"Bytes Sent/sec"`
BytesTotalPerSec float64 `perflib:"Bytes Total/sec"`
Name string
OutputQueueLength float64 `perflib:"Output Queue Length"`
PacketsOutboundDiscarded float64 `perflib:"Packets Outbound Discarded"`
PacketsOutboundErrors float64 `perflib:"Packets Outbound Errors"`
PacketsPerSec float64 `perflib:"Packets/sec"`
@@ -232,13 +166,13 @@ type networkInterface struct {
func (c *NetworkCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []networkInterface
if err := unmarshalObject(ctx.perfObjects["Network Interface"], &dst, c.logger); err != nil {
if err := unmarshalObject(ctx.perfObjects["Network Interface"], &dst); err != nil {
return nil, err
}
for _, nic := range dst {
if c.nicExcludePattern.MatchString(nic.Name) ||
!c.nicIncludePattern.MatchString(nic.Name) {
if c.nicBlacklistPattern.MatchString(nic.Name) ||
!c.nicWhitelistPattern.MatchString(nic.Name) {
continue
}
@@ -266,12 +200,6 @@ func (c *NetworkCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
nic.BytesTotalPerSec,
name,
)
ch <- prometheus.MustNewConstMetric(
c.OutputQueueLength,
prometheus.GaugeValue,
nic.OutputQueueLength,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsOutboundDiscarded,
prometheus.CounterValue,
@@ -323,7 +251,7 @@ func (c *NetworkCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.CurrentBandwidth,
prometheus.GaugeValue,
nic.CurrentBandwidth/8,
nic.CurrentBandwidth,
name,
)
}

View File

@@ -1,11 +1,8 @@
//go:build windows
// +build windows
package collector
import (
"testing"
)
import "testing"
func TestNetworkToInstanceName(t *testing.T) {
data := map[string]string{
@@ -18,10 +15,3 @@ func TestNetworkToInstanceName(t *testing.T) {
}
}
}
func BenchmarkNetCollector(b *testing.B) {
// Include is not set in testing context (kingpin flags not parsed), causing the collector to skip all interfaces.
localNicInclude := ".+"
nicInclude = &localNicInclude
benchmarkCollector(b, "net", newNetworkCollector)
}

View File

@@ -1,31 +1,29 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrexceptions", NewNETFramework_NETCLRExceptionsCollector)
}
// A NETFramework_NETCLRExceptionsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics
type NETFramework_NETCLRExceptionsCollector struct {
logger log.Logger
NumberofExcepsThrown *prometheus.Desc
NumberofFilters *prometheus.Desc
NumberofFinallys *prometheus.Desc
ThrowToCatchDepth *prometheus.Desc
}
// newNETFramework_NETCLRExceptionsCollector ...
func newNETFramework_NETCLRExceptionsCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRExceptionsCollector ...
func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
const subsystem = "netframework_clrexceptions"
return &NETFramework_NETCLRExceptionsCollector{
logger: log.With(logger, "collector", subsystem),
NumberofExcepsThrown: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "exceptions_thrown_total"),
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
@@ -57,7 +55,7 @@ func newNETFramework_NETCLRExceptionsCollector(logger log.Logger) (Collector, er
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
return err
}
return nil
@@ -75,7 +73,7 @@ type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
func (c *NETFramework_NETCLRExceptionsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNetFrameworkNETCLRExceptionsCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRExceptionsCollector)
}

View File

@@ -1,29 +1,28 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrinterop", NewNETFramework_NETCLRInteropCollector)
}
// A NETFramework_NETCLRInteropCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics
type NETFramework_NETCLRInteropCollector struct {
logger log.Logger
NumberofCCWs *prometheus.Desc
Numberofmarshalling *prometheus.Desc
NumberofStubs *prometheus.Desc
}
// newNETFramework_NETCLRInteropCollector ...
func newNETFramework_NETCLRInteropCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRInteropCollector ...
func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
const subsystem = "netframework_clrinterop"
return &NETFramework_NETCLRInteropCollector{
logger: log.With(logger, "collector", subsystem),
NumberofCCWs: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "com_callable_wrappers_total"),
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
@@ -49,7 +48,7 @@ func newNETFramework_NETCLRInteropCollector(logger log.Logger) (Collector, error
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
return err
}
return nil
@@ -67,7 +66,7 @@ type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
func (c *NETFramework_NETCLRInteropCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNETFrameworkNETCLRInteropCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRInteropCollector)
}

View File

@@ -1,30 +1,29 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrjit", NewNETFramework_NETCLRJitCollector)
}
// A NETFramework_NETCLRJitCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics
type NETFramework_NETCLRJitCollector struct {
logger log.Logger
NumberofMethodsJitted *prometheus.Desc
TimeinJit *prometheus.Desc
StandardJitFailures *prometheus.Desc
TotalNumberofILBytesJitted *prometheus.Desc
}
// newNETFramework_NETCLRJitCollector ...
func newNETFramework_NETCLRJitCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRJitCollector ...
func NewNETFramework_NETCLRJitCollector() (Collector, error) {
const subsystem = "netframework_clrjit"
return &NETFramework_NETCLRJitCollector{
logger: log.With(logger, "collector", subsystem),
NumberofMethodsJitted: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "jit_methods_total"),
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
@@ -56,7 +55,7 @@ func newNETFramework_NETCLRJitCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrjit metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
return err
}
return nil
@@ -76,7 +75,7 @@ type Win32_PerfRawData_NETFramework_NETCLRJit struct {
func (c *NETFramework_NETCLRJitCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNETFrameworkNETCLRJitCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRJitCollector)
}

View File

@@ -1,19 +1,19 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrloading", NewNETFramework_NETCLRLoadingCollector)
}
// A NETFramework_NETCLRLoadingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics
type NETFramework_NETCLRLoadingCollector struct {
logger log.Logger
BytesinLoaderHeap *prometheus.Desc
Currentappdomains *prometheus.Desc
CurrentAssemblies *prometheus.Desc
@@ -25,11 +25,10 @@ type NETFramework_NETCLRLoadingCollector struct {
TotalNumberofLoadFailures *prometheus.Desc
}
// newNETFramework_NETCLRLoadingCollector ...
func newNETFramework_NETCLRLoadingCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRLoadingCollector ...
func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
const subsystem = "netframework_clrloading"
return &NETFramework_NETCLRLoadingCollector{
logger: log.With(logger, "collector", subsystem),
BytesinLoaderHeap: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "loader_heap_size_bytes"),
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
@@ -91,7 +90,7 @@ func newNETFramework_NETCLRLoadingCollector(logger log.Logger) (Collector, error
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrloading metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
return err
}
return nil
@@ -120,7 +119,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
func (c *NETFramework_NETCLRLoadingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNETFrameworkNETCLRLoadingCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRLoadingCollector)
}

View File

@@ -1,19 +1,19 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrlocksandthreads", NewNETFramework_NETCLRLocksAndThreadsCollector)
}
// A NETFramework_NETCLRLocksAndThreadsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics
type NETFramework_NETCLRLocksAndThreadsCollector struct {
logger log.Logger
CurrentQueueLength *prometheus.Desc
NumberofcurrentlogicalThreads *prometheus.Desc
NumberofcurrentphysicalThreads *prometheus.Desc
@@ -23,11 +23,10 @@ type NETFramework_NETCLRLocksAndThreadsCollector struct {
TotalNumberofContentions *prometheus.Desc
}
// newNETFramework_NETCLRLocksAndThreadsCollector ...
func newNETFramework_NETCLRLocksAndThreadsCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRLocksAndThreadsCollector ...
func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
const subsystem = "netframework_clrlocksandthreads"
return &NETFramework_NETCLRLocksAndThreadsCollector{
logger: log.With(logger, "collector", subsystem),
CurrentQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "current_queue_length"),
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
@@ -77,7 +76,7 @@ func newNETFramework_NETCLRLocksAndThreadsCollector(logger log.Logger) (Collecto
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
return err
}
return nil
@@ -100,7 +99,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
func (c *NETFramework_NETCLRLocksAndThreadsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNETFrameworkNETCLRLocksAndThreadsCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRLocksAndThreadsCollector)
}

View File

@@ -1,19 +1,19 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrmemory", NewNETFramework_NETCLRMemoryCollector)
}
// A NETFramework_NETCLRMemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics
type NETFramework_NETCLRMemoryCollector struct {
logger log.Logger
AllocatedBytes *prometheus.Desc
FinalizationSurvivors *prometheus.Desc
HeapSize *prometheus.Desc
@@ -31,11 +31,10 @@ type NETFramework_NETCLRMemoryCollector struct {
PromotedMemoryfromGen1 *prometheus.Desc
}
// newNETFramework_NETCLRMemoryCollector ...
func newNETFramework_NETCLRMemoryCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRMemoryCollector ...
func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
const subsystem = "netframework_clrmemory"
return &NETFramework_NETCLRMemoryCollector{
logger: log.With(logger, "collector", subsystem),
AllocatedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "allocated_bytes_total"),
"Displays the total number of bytes allocated on the garbage collection heap.",
@@ -115,7 +114,7 @@ func newNETFramework_NETCLRMemoryCollector(logger log.Logger) (Collector, error)
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
return err
}
return nil
@@ -124,31 +123,26 @@ func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan
type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
Name string
AllocatedBytesPersec uint64
FinalizationSurvivors uint64
Frequency_PerfTime uint64
Gen0heapsize uint64
Gen0PromotedBytesPerSec uint64
Gen1heapsize uint64
Gen1PromotedBytesPerSec uint64
Gen2heapsize uint64
LargeObjectHeapsize uint64
NumberBytesinallHeaps uint64
NumberGCHandles uint64
NumberGen0Collections uint64
NumberGen1Collections uint64
NumberGen2Collections uint64
NumberInducedGC uint64
NumberofPinnedObjects uint64
NumberofSinkBlocksinuse uint64
NumberTotalcommittedBytes uint64
NumberTotalreservedBytes uint64
// PercentTimeinGC has countertype=PERF_RAW_FRACTION.
// Formula: (100 * CounterValue) / BaseValue
// By docs https://docs.microsoft.com/en-us/previous-versions/windows/internet-explorer/ie-developer/scripting-articles/ms974615(v=msdn.10)#perf_raw_fraction
PercentTimeinGC uint32
// BaseValue is just a "magic" number used to make the calculation come out right.
PercentTimeinGC_base uint32
AllocatedBytesPersec uint64
FinalizationSurvivors uint64
Frequency_PerfTime uint64
Gen0heapsize uint64
Gen0PromotedBytesPerSec uint64
Gen1heapsize uint64
Gen1PromotedBytesPerSec uint64
Gen2heapsize uint64
LargeObjectHeapsize uint64
NumberBytesinallHeaps uint64
NumberGCHandles uint64
NumberGen0Collections uint64
NumberGen1Collections uint64
NumberGen2Collections uint64
NumberInducedGC uint64
NumberofPinnedObjects uint64
NumberofSinkBlocksinuse uint64
NumberTotalcommittedBytes uint64
NumberTotalreservedBytes uint64
PercentTimeinGC uint32
ProcessID uint64
PromotedFinalizationMemoryfromGen0 uint64
PromotedMemoryfromGen0 uint64
@@ -157,7 +151,7 @@ type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
func (c *NETFramework_NETCLRMemoryCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -299,7 +293,7 @@ func (c *NETFramework_NETCLRMemoryCollector) collect(ch chan<- prometheus.Metric
ch <- prometheus.MustNewConstMetric(
c.TimeinGC,
prometheus.GaugeValue,
float64(100*process.PercentTimeinGC)/float64(process.PercentTimeinGC_base),
float64(process.PercentTimeinGC)/float64(process.Frequency_PerfTime),
process.Name,
)
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNETFrameworkNETCLRMemoryCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRMemoryCollector)
}

View File

@@ -1,19 +1,19 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrremoting", NewNETFramework_NETCLRRemotingCollector)
}
// A NETFramework_NETCLRRemotingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics
type NETFramework_NETCLRRemotingCollector struct {
logger log.Logger
Channels *prometheus.Desc
ContextBoundClassesLoaded *prometheus.Desc
ContextBoundObjects *prometheus.Desc
@@ -22,11 +22,10 @@ type NETFramework_NETCLRRemotingCollector struct {
TotalRemoteCalls *prometheus.Desc
}
// newNETFramework_NETCLRRemotingCollector ...
func newNETFramework_NETCLRRemotingCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRRemotingCollector ...
func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
const subsystem = "netframework_clrremoting"
return &NETFramework_NETCLRRemotingCollector{
logger: log.With(logger, "collector", subsystem),
Channels: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "channels_total"),
"Displays the total number of remoting channels registered across all application domains since application started.",
@@ -70,7 +69,7 @@ func newNETFramework_NETCLRRemotingCollector(logger log.Logger) (Collector, erro
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
return err
}
return nil
@@ -90,7 +89,7 @@ type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
func (c *NETFramework_NETCLRRemotingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNETFrameworkNETCLRRemotingCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRRemotingCollector)
}

View File

@@ -1,30 +1,29 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("netframework_clrsecurity", NewNETFramework_NETCLRSecurityCollector)
}
// A NETFramework_NETCLRSecurityCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics
type NETFramework_NETCLRSecurityCollector struct {
logger log.Logger
NumberLinkTimeChecks *prometheus.Desc
TimeinRTchecks *prometheus.Desc
StackWalkDepth *prometheus.Desc
TotalRuntimeChecks *prometheus.Desc
}
// newNETFramework_NETCLRSecurityCollector ...
func newNETFramework_NETCLRSecurityCollector(logger log.Logger) (Collector, error) {
// NewNETFramework_NETCLRSecurityCollector ...
func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
const subsystem = "netframework_clrsecurity"
return &NETFramework_NETCLRSecurityCollector{
logger: log.With(logger, "collector", subsystem),
NumberLinkTimeChecks: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "link_time_checks_total"),
"Displays the total number of link-time code access security checks since the application started.",
@@ -56,7 +55,7 @@ func newNETFramework_NETCLRSecurityCollector(logger log.Logger) (Collector, erro
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "desc", desc, "err", err)
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
return err
}
return nil
@@ -75,7 +74,7 @@ type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
func (c *NETFramework_NETCLRSecurityCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
q := queryAll(&dst, c.logger)
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -1,10 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNETFrameworkNETCLRSecurityCollector(b *testing.B) {
// No context name required as collector source is WMI
benchmarkCollector(b, "", newNETFramework_NETCLRSecurityCollector)
}

View File

@@ -1,425 +0,0 @@
package collector
import (
"fmt"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A npsCollector is a Prometheus collector for WMI Win32_PerfRawData_IAS_NPSAuthenticationServer and Win32_PerfRawData_IAS_NPSAccountingServer metrics
type npsCollector struct {
logger log.Logger
AccessAccepts *prometheus.Desc
AccessChallenges *prometheus.Desc
AccessRejects *prometheus.Desc
AccessRequests *prometheus.Desc
AccessBadAuthenticators *prometheus.Desc
AccessDroppedPackets *prometheus.Desc
AccessInvalidRequests *prometheus.Desc
AccessMalformedPackets *prometheus.Desc
AccessPacketsReceived *prometheus.Desc
AccessPacketsSent *prometheus.Desc
AccessServerResetTime *prometheus.Desc
AccessServerUpTime *prometheus.Desc
AccessUnknownType *prometheus.Desc
AccountingRequests *prometheus.Desc
AccountingResponses *prometheus.Desc
AccountingBadAuthenticators *prometheus.Desc
AccountingDroppedPackets *prometheus.Desc
AccountingInvalidRequests *prometheus.Desc
AccountingMalformedPackets *prometheus.Desc
AccountingNoRecord *prometheus.Desc
AccountingPacketsReceived *prometheus.Desc
AccountingPacketsSent *prometheus.Desc
AccountingServerResetTime *prometheus.Desc
AccountingServerUpTime *prometheus.Desc
AccountingUnknownType *prometheus.Desc
}
func newNPSCollector(logger log.Logger) (Collector, error) {
const subsystem = "nps"
logger = log.With(logger, "collector", subsystem)
return &npsCollector{
logger: logger,
AccessAccepts: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_accepts"),
"(AccessAccepts)",
nil,
nil,
),
AccessChallenges: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_challenges"),
"(AccessChallenges)",
nil,
nil,
),
AccessRejects: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_rejects"),
"(AccessRejects)",
nil,
nil,
),
AccessRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_requests"),
"(AccessRequests)",
nil,
nil,
),
AccessBadAuthenticators: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_bad_authenticators"),
"(BadAuthenticators)",
nil,
nil,
),
AccessDroppedPackets: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_dropped_packets"),
"(DroppedPackets)",
nil,
nil,
),
AccessInvalidRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_invalid_requests"),
"(InvalidRequests)",
nil,
nil,
),
AccessMalformedPackets: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_malformed_packets"),
"(MalformedPackets)",
nil,
nil,
),
AccessPacketsReceived: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_packets_received"),
"(PacketsReceived)",
nil,
nil,
),
AccessPacketsSent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_packets_sent"),
"(PacketsSent)",
nil,
nil,
),
AccessServerResetTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_server_reset_time"),
"(ServerResetTime)",
nil,
nil,
),
AccessServerUpTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_server_up_time"),
"(ServerUpTime)",
nil,
nil,
),
AccessUnknownType: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "access_unknown_type"),
"(UnknownType)",
nil,
nil,
),
AccountingRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_requests"),
"(AccountingRequests)",
nil,
nil,
),
AccountingResponses: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_responses"),
"(AccountingResponses)",
nil,
nil,
),
AccountingBadAuthenticators: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_bad_authenticators"),
"(BadAuthenticators)",
nil,
nil,
),
AccountingDroppedPackets: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_dropped_packets"),
"(DroppedPackets)",
nil,
nil,
),
AccountingInvalidRequests: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_invalid_requests"),
"(InvalidRequests)",
nil,
nil,
),
AccountingMalformedPackets: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_malformed_packets"),
"(MalformedPackets)",
nil,
nil,
),
AccountingNoRecord: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_no_record"),
"(NoRecord)",
nil,
nil,
),
AccountingPacketsReceived: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_packets_received"),
"(PacketsReceived)",
nil,
nil,
),
AccountingPacketsSent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_packets_sent"),
"(PacketsSent)",
nil,
nil,
),
AccountingServerResetTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_server_reset_time"),
"(ServerResetTime)",
nil,
nil,
),
AccountingServerUpTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_server_up_time"),
"(ServerUpTime)",
nil,
nil,
),
AccountingUnknownType: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "accounting_unknown_type"),
"(UnknownType)",
nil,
nil,
),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *npsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.CollectAccept(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accept data: %s %v", desc, err))
return err
}
if desc, err := c.CollectAccounting(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accounting data: %s %v", desc, err))
return err
}
return nil
}
// Win32_PerfRawData_IAS_NPSAuthenticationServer docs:
// at the moment there is no Microsoft documentation
type Win32_PerfRawData_IAS_NPSAuthenticationServer struct {
Name string
AccessAccepts uint32
AccessChallenges uint32
AccessRejects uint32
AccessRequests uint32
AccessBadAuthenticators uint32
AccessDroppedPackets uint32
AccessInvalidRequests uint32
AccessMalformedPackets uint32
AccessPacketsReceived uint32
AccessPacketsSent uint32
AccessServerResetTime uint32
AccessServerUpTime uint32
AccessUnknownType uint32
}
type Win32_PerfRawData_IAS_NPSAccountingServer struct {
Name string
AccountingRequests uint32
AccountingResponses uint32
AccountingBadAuthenticators uint32
AccountingDroppedPackets uint32
AccountingInvalidRequests uint32
AccountingMalformedPackets uint32
AccountingNoRecord uint32
AccountingPacketsReceived uint32
AccountingPacketsSent uint32
AccountingServerResetTime uint32
AccountingServerUpTime uint32
AccountingUnknownType uint32
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *npsCollector) CollectAccept(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_IAS_NPSAuthenticationServer
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
ch <- prometheus.MustNewConstMetric(
c.AccessAccepts,
prometheus.CounterValue,
float64(dst[0].AccessAccepts),
)
ch <- prometheus.MustNewConstMetric(
c.AccessChallenges,
prometheus.CounterValue,
float64(dst[0].AccessChallenges),
)
ch <- prometheus.MustNewConstMetric(
c.AccessRejects,
prometheus.CounterValue,
float64(dst[0].AccessRejects),
)
ch <- prometheus.MustNewConstMetric(
c.AccessRequests,
prometheus.CounterValue,
float64(dst[0].AccessRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccessBadAuthenticators,
prometheus.CounterValue,
float64(dst[0].AccessBadAuthenticators),
)
ch <- prometheus.MustNewConstMetric(
c.AccessDroppedPackets,
prometheus.CounterValue,
float64(dst[0].AccessDroppedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccessInvalidRequests,
prometheus.CounterValue,
float64(dst[0].AccessInvalidRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccessMalformedPackets,
prometheus.CounterValue,
float64(dst[0].AccessMalformedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccessPacketsReceived,
prometheus.CounterValue,
float64(dst[0].AccessPacketsReceived),
)
ch <- prometheus.MustNewConstMetric(
c.AccessPacketsSent,
prometheus.CounterValue,
float64(dst[0].AccessPacketsSent),
)
ch <- prometheus.MustNewConstMetric(
c.AccessServerResetTime,
prometheus.CounterValue,
float64(dst[0].AccessServerResetTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccessServerUpTime,
prometheus.CounterValue,
float64(dst[0].AccessServerUpTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccessUnknownType,
prometheus.CounterValue,
float64(dst[0].AccessUnknownType),
)
return nil, nil
}
func (c *npsCollector) CollectAccounting(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_IAS_NPSAccountingServer
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
ch <- prometheus.MustNewConstMetric(
c.AccountingRequests,
prometheus.CounterValue,
float64(dst[0].AccountingRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingResponses,
prometheus.CounterValue,
float64(dst[0].AccountingResponses),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingBadAuthenticators,
prometheus.CounterValue,
float64(dst[0].AccountingBadAuthenticators),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingDroppedPackets,
prometheus.CounterValue,
float64(dst[0].AccountingDroppedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingInvalidRequests,
prometheus.CounterValue,
float64(dst[0].AccountingInvalidRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingMalformedPackets,
prometheus.CounterValue,
float64(dst[0].AccountingMalformedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingNoRecord,
prometheus.CounterValue,
float64(dst[0].AccountingNoRecord),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingPacketsReceived,
prometheus.CounterValue,
float64(dst[0].AccountingPacketsReceived),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingPacketsSent,
prometheus.CounterValue,
float64(dst[0].AccountingPacketsSent),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingServerResetTime,
prometheus.CounterValue,
float64(dst[0].AccountingServerResetTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingServerUpTime,
prometheus.CounterValue,
float64(dst[0].AccountingServerUpTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingUnknownType,
prometheus.CounterValue,
float64(dst[0].AccountingUnknownType),
)
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkNPSCollector(b *testing.B) {
benchmarkCollector(b, "nps", newNPSCollector)
}

View File

@@ -1,27 +1,22 @@
//go:build windows
// +build windows
package collector
import (
"fmt"
"os"
"strings"
"errors"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/headers/netapi32"
"github.com/prometheus-community/windows_exporter/headers/psapi"
"github.com/prometheus-community/windows_exporter/headers/sysinfoapi"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("os", NewOSCollector)
}
// A OSCollector is a Prometheus collector for WMI metrics
type OSCollector struct {
logger log.Logger
OSInformation *prometheus.Desc
PhysicalMemoryFreeBytes *prometheus.Desc
PagingFreeBytes *prometheus.Desc
@@ -37,23 +32,15 @@ type OSCollector struct {
Timezone *prometheus.Desc
}
type pagingFileCounter struct {
Name string
Usage float64 `perflib:"% Usage"`
UsagePeak float64 `perflib:"% Usage Peak"`
}
// newOSCollector ...
func newOSCollector(logger log.Logger) (Collector, error) {
// NewOSCollector ...
func NewOSCollector() (Collector, error) {
const subsystem = "os"
return &OSCollector{
logger: log.With(logger, "collector", subsystem),
OSInformation: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "info"),
"OperatingSystem.Caption, OperatingSystem.Version",
[]string{"product", "version", "major_version", "minor_version", "build_number"},
[]string{"product", "version"},
nil,
),
PagingLimitBytes: prometheus.NewDesc(
@@ -99,7 +86,7 @@ func newOSCollector(logger log.Logger) (Collector, error) {
nil,
),
ProcessMemoryLimitBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "process_memory_limit_bytes"),
prometheus.BuildFQName(Namespace, subsystem, "process_memory_limix_bytes"),
"OperatingSystem.MaxProcessMemorySize",
nil,
nil,
@@ -134,8 +121,8 @@ func newOSCollector(logger log.Logger) (Collector, error) {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting os metrics", "desc", desc, "err", err)
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting os metrics:", desc, err)
return err
}
return nil
@@ -159,103 +146,41 @@ type Win32_OperatingSystem struct {
Version string
}
func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
nwgi, err := netapi32.GetWorkstationInfo()
if err != nil {
func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_OperatingSystem
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
gmse, err := sysinfoapi.GlobalMemoryStatusEx()
if err != nil {
return nil, err
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
currentTime := time.Now()
timezoneName, _ := currentTime.Zone()
// Get total allocation of paging files across all disks.
memManKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management`, registry.QUERY_VALUE)
defer memManKey.Close()
if err != nil {
return nil, err
}
pagingFiles, _, pagingErr := memManKey.GetStringsValue("ExistingPageFiles")
// Get build number and product name from registry
ntKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
defer ntKey.Close()
if err != nil {
return nil, err
}
pn, _, err := ntKey.GetStringValue("ProductName")
if err != nil {
return nil, err
}
bn, _, err := ntKey.GetStringValue("CurrentBuildNumber")
if err != nil {
return nil, err
}
var fsipf float64
for _, pagingFile := range pagingFiles {
fileString := strings.ReplaceAll(pagingFile, `\??\`, "")
file, err := os.Stat(fileString)
// For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting.
if err != nil {
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("Failed to read page file (reason: %s): %s\n", err, fileString))
} else {
fsipf += float64(file.Size())
}
}
gpi, err := psapi.GetPerformanceInfo()
if err != nil {
return nil, err
}
var pfc = make([]pagingFileCounter, 0)
if err := unmarshalObject(ctx.perfObjects["Paging File"], &pfc, c.logger); err != nil {
return nil, err
}
// Get current page file usage.
var pfbRaw float64
for _, pageFile := range pfc {
if strings.Contains(strings.ToLower(pageFile.Name), "_total") {
continue
}
pfbRaw += pageFile.Usage
}
// Subtract from total page file allocation on disk.
pfb := fsipf - (pfbRaw * float64(gpi.PageSize))
ch <- prometheus.MustNewConstMetric(
c.OSInformation,
prometheus.GaugeValue,
1.0,
fmt.Sprintf("Microsoft %s", pn), // Caption
fmt.Sprintf("%d.%d.%s", nwgi.VersionMajor, nwgi.VersionMinor, bn), // Version
fmt.Sprintf("%d", nwgi.VersionMajor), // Major Version
fmt.Sprintf("%d", nwgi.VersionMinor), // Minor Version
bn, // Build number
dst[0].Caption,
dst[0].Version,
)
ch <- prometheus.MustNewConstMetric(
c.PhysicalMemoryFreeBytes,
prometheus.GaugeValue,
float64(gmse.AvailPhys),
float64(dst[0].FreePhysicalMemory*1024), // KiB -> bytes
)
time := dst[0].LocalDateTime
ch <- prometheus.MustNewConstMetric(
c.Time,
prometheus.GaugeValue,
float64(currentTime.Unix()),
float64(time.Unix()),
)
timezoneName, _ := time.Zone()
ch <- prometheus.MustNewConstMetric(
c.Timezone,
prometheus.GaugeValue,
@@ -263,64 +188,58 @@ func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (
timezoneName,
)
if pagingErr == nil {
ch <- prometheus.MustNewConstMetric(
c.PagingFreeBytes,
prometheus.GaugeValue,
pfb,
)
ch <- prometheus.MustNewConstMetric(
c.PagingFreeBytes,
prometheus.GaugeValue,
float64(dst[0].FreeSpaceInPagingFiles*1024), // KiB -> bytes
)
ch <- prometheus.MustNewConstMetric(
c.PagingLimitBytes,
prometheus.GaugeValue,
fsipf,
)
} else {
_ = level.Debug(c.logger).Log("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
}
ch <- prometheus.MustNewConstMetric(
c.VirtualMemoryFreeBytes,
prometheus.GaugeValue,
float64(gmse.AvailPageFile),
float64(dst[0].FreeVirtualMemory*1024), // KiB -> bytes
)
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824
// https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem
ch <- prometheus.MustNewConstMetric(
c.ProcessesLimit,
prometheus.GaugeValue,
float64(4294967295),
float64(dst[0].MaxNumberOfProcesses),
)
ch <- prometheus.MustNewConstMetric(
c.ProcessMemoryLimitBytes,
prometheus.GaugeValue,
float64(gmse.TotalVirtual),
float64(dst[0].MaxProcessMemorySize*1024), // KiB -> bytes
)
ch <- prometheus.MustNewConstMetric(
c.Processes,
prometheus.GaugeValue,
float64(gpi.ProcessCount),
float64(dst[0].NumberOfProcesses),
)
ch <- prometheus.MustNewConstMetric(
c.Users,
prometheus.GaugeValue,
float64(nwgi.LoggedOnUsers),
float64(dst[0].NumberOfUsers),
)
ch <- prometheus.MustNewConstMetric(
c.PagingLimitBytes,
prometheus.GaugeValue,
float64(dst[0].SizeStoredInPagingFiles*1024), // KiB -> bytes
)
ch <- prometheus.MustNewConstMetric(
c.VirtualMemoryBytes,
prometheus.GaugeValue,
float64(gmse.TotalPageFile),
float64(dst[0].TotalVirtualMemorySize*1024), // KiB -> bytes
)
ch <- prometheus.MustNewConstMetric(
c.VisibleMemoryBytes,
prometheus.GaugeValue,
float64(gmse.TotalPhys),
float64(dst[0].TotalVisibleMemorySize*1024), // KiB -> bytes
)
return nil, nil

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkOSCollector(b *testing.B) {
benchmarkCollector(b, "os", newOSCollector)
}

View File

@@ -4,16 +4,16 @@ import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/prometheus-community/windows_exporter/perflib"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
perflibCollector "github.com/leoluk/perflib_exporter/collector"
"github.com/leoluk/perflib_exporter/perflib"
"github.com/prometheus/common/log"
)
var nametable = perflib.QueryNameTable("Counter 009") // Reads the names in English TODO: validate that the English names are always present
func MapCounterToIndex(name string) string {
return strconv.Itoa(int(perflib.CounterNameTable.LookupIndex(name)))
return strconv.Itoa(int(nametable.LookupIndex(name)))
}
func getPerflibSnapshot(objNames string) (map[string]*perflib.PerfObject, error) {
@@ -29,7 +29,7 @@ func getPerflibSnapshot(objNames string) (map[string]*perflib.PerfObject, error)
return indexed, nil
}
func unmarshalObject(obj *perflib.PerfObject, vs interface{}, logger log.Logger) error {
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
if obj == nil {
return fmt.Errorf("counter not found")
}
@@ -67,20 +67,10 @@ func unmarshalObject(obj *perflib.PerfObject, vs interface{}, logger log.Logger)
if tag == "" {
continue
}
secondValue := false
st := strings.Split(tag, ",")
tag = st[0]
for _, t := range st {
if t == "secondvalue" {
secondValue = true
}
}
ctr, found := counters[tag]
if !found {
_ = level.Debug(logger).Log("msg", fmt.Sprintf("missing counter %q, have %v", tag, counterMapKeys(counters)))
log.Debugf("missing counter %q, have %v", tag, counterMapKeys(counters))
continue
}
if !target.Field(i).CanSet() {
@@ -90,18 +80,10 @@ func unmarshalObject(obj *perflib.PerfObject, vs interface{}, logger log.Logger)
return fmt.Errorf("tagged field %v has wrong type %v, must be float64", f.Name, fieldType)
}
if secondValue {
if !ctr.Def.HasSecondValue {
return fmt.Errorf("tagged field %v expected a SecondValue, which was not present", f.Name)
}
target.Field(i).SetFloat(float64(ctr.SecondValue))
continue
}
switch ctr.Def.CounterType {
case perflib.PERF_ELAPSED_TIME:
case perflibCollector.PERF_ELAPSED_TIME:
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
case perflib.PERF_100NSEC_TIMER, perflib.PERF_PRECISION_100NS_TIMER:
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
default:
target.Field(i).SetFloat(float64(ctr.Value))

View File

@@ -4,15 +4,13 @@ import (
"reflect"
"testing"
"github.com/prometheus-community/windows_exporter/perflib"
"github.com/go-kit/log"
perflibCollector "github.com/leoluk/perflib_exporter/collector"
"github.com/leoluk/perflib_exporter/perflib"
)
type simple struct {
ValA float64 `perflib:"Something"`
ValB float64 `perflib:"Something Else"`
ValC float64 `perflib:"Something Else,secondvalue"`
}
func TestUnmarshalPerflib(t *testing.T) {
@@ -38,7 +36,7 @@ func TestUnmarshalPerflib(t *testing.T) {
{
Def: &perflib.PerfCounterDef{
Name: "Something",
CounterType: perflib.PERF_COUNTER_COUNTER,
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
},
Value: 123,
},
@@ -58,24 +56,22 @@ func TestUnmarshalPerflib(t *testing.T) {
{
Def: &perflib.PerfCounterDef{
Name: "Something",
CounterType: perflib.PERF_COUNTER_COUNTER,
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
},
Value: 123,
},
{
Def: &perflib.PerfCounterDef{
Name: "Something Else",
CounterType: perflib.PERF_COUNTER_COUNTER,
HasSecondValue: true,
Name: "Something Else",
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
},
Value: 256,
SecondValue: 222,
Value: 256,
},
},
},
},
},
expectedOutput: []simple{{ValA: 123, ValB: 256, ValC: 222}},
expectedOutput: []simple{{ValA: 123, ValB: 256}},
expectError: false,
},
{
@@ -87,7 +83,7 @@ func TestUnmarshalPerflib(t *testing.T) {
{
Def: &perflib.PerfCounterDef{
Name: "Something",
CounterType: perflib.PERF_COUNTER_COUNTER,
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
},
Value: 321,
},
@@ -98,7 +94,7 @@ func TestUnmarshalPerflib(t *testing.T) {
{
Def: &perflib.PerfCounterDef{
Name: "Something",
CounterType: perflib.PERF_COUNTER_COUNTER,
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
},
Value: 231,
},
@@ -113,7 +109,7 @@ func TestUnmarshalPerflib(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
output := make([]simple, 0)
err := unmarshalObject(c.obj, &output, log.NewNopLogger())
err := unmarshalObject(c.obj, &output)
if err != nil && !c.expectError {
t.Errorf("Did not expect error, got %q", err)
}

View File

@@ -1,44 +1,30 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
FlagProcessOldExclude = "collector.process.blacklist"
FlagProcessOldInclude = "collector.process.whitelist"
FlagProcessExclude = "collector.process.exclude"
FlagProcessInclude = "collector.process.include"
)
func init() {
registerCollector("process", NewProcessCollector)
}
var (
processOldInclude *string
processOldExclude *string
processInclude *string
processExclude *string
processIncludeSet bool
processExcludeSet bool
processWhereClause = kingpin.Flag(
"collector.process.processes-where",
"WQL 'where' clause to use in WMI metrics query. Limits the response to the processes you specify and reduces the size of the response.",
).Default("").String()
)
type processCollector struct {
logger log.Logger
// A ProcessCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfProc_Process metrics
type ProcessCollector struct {
StartTime *prometheus.Desc
CPUTimeTotal *prometheus.Desc
HandleCount *prometheus.Desc
@@ -51,70 +37,20 @@ type processCollector struct {
PrivateBytes *prometheus.Desc
ThreadCount *prometheus.Desc
VirtualBytes *prometheus.Desc
WorkingSetPrivate *prometheus.Desc
WorkingSetPeak *prometheus.Desc
WorkingSet *prometheus.Desc
processIncludePattern *regexp.Regexp
processExcludePattern *regexp.Regexp
}
// newProcessCollectorFlags ...
func newProcessCollectorFlags(app *kingpin.Application) {
processInclude = app.Flag(
FlagProcessInclude,
"Regexp of processes to include. Process name must both match include and not match exclude to be included.",
).Default(".*").PreAction(func(c *kingpin.ParseContext) error {
processIncludeSet = true
return nil
}).String()
processExclude = app.Flag(
FlagProcessExclude,
"Regexp of processes to exclude. Process name must both match include and not match exclude to be included.",
).Default("").PreAction(func(c *kingpin.ParseContext) error {
processExcludeSet = true
return nil
}).String()
processOldInclude = app.Flag(
FlagProcessOldInclude,
"DEPRECATED: Use --collector.process.include",
).Hidden().String()
processOldExclude = app.Flag(
FlagProcessOldExclude,
"DEPRECATED: Use --collector.process.exclude",
).Hidden().String()
queryWhereClause string
}
// NewProcessCollector ...
func newProcessCollector(logger log.Logger) (Collector, error) {
func NewProcessCollector() (Collector, error) {
const subsystem = "process"
logger = log.With(logger, "collector", subsystem)
if *processOldExclude != "" {
if !processExcludeSet {
_ = level.Warn(logger).Log("msg", "--collector.process.blacklist is DEPRECATED and will be removed in a future release, use --collector.process.exclude")
*processExclude = *processOldExclude
} else {
return nil, errors.New("--collector.process.blacklist and --collector.process.exclude are mutually exclusive")
}
}
if *processOldInclude != "" {
if !processIncludeSet {
_ = level.Warn(logger).Log("msg", "--collector.process.whitelist is DEPRECATED and will be removed in a future release, use --collector.process.include")
*processInclude = *processOldInclude
} else {
return nil, errors.New("--collector.process.whitelist and --collector.process.include are mutually exclusive")
}
if *processWhereClause == "" {
log.Warn("No where-clause specified for process collector. This will generate a very large number of metrics!")
}
if *processInclude == ".*" && *processExclude == "" {
_ = level.Warn(logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
}
return &processCollector{
logger: logger,
return &ProcessCollector{
StartTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "start_time"),
"Time of process start.",
@@ -123,43 +59,43 @@ func newProcessCollector(logger log.Logger) (Collector, error) {
),
CPUTimeTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_time_total"),
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).",
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user). An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions is included in this count.",
[]string{"process", "process_id", "creating_process_id", "mode"},
nil,
),
HandleCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "handles"),
prometheus.BuildFQName(Namespace, subsystem, "handle_count"),
"Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
IOBytesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "io_bytes_total"),
"Bytes issued to I/O operations in different modes (read, write, other).",
"Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. ",
[]string{"process", "process_id", "creating_process_id", "mode"},
nil,
),
IOOperationsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "io_operations_total"),
"I/O operations issued in different modes (read, write, other).",
"I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. ",
[]string{"process", "process_id", "creating_process_id", "mode"},
nil,
),
PageFaultsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "page_faults_total"),
"Page faults by the threads executing in this process.",
"Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
PageFileBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "page_file_bytes"),
"Current number of bytes this process has used in the paging file(s).",
"Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
PoolBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pool_bytes"),
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.",
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. Nonpaged pool bytes is calculated differently than paged pool bytes, so it might not equal the total of paged pool bytes.",
[]string{"process", "process_id", "creating_process_id", "pool"},
nil,
),
@@ -176,94 +112,94 @@ func newProcessCollector(logger log.Logger) (Collector, error) {
nil,
),
ThreadCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "threads"),
"Number of threads currently active in this process.",
prometheus.BuildFQName(Namespace, subsystem, "thread_count"),
"Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
VirtualBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "virtual_bytes"),
"Current size, in bytes, of the virtual address space that the process is using.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
WorkingSetPrivate: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "working_set_private_bytes"),
"Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
WorkingSetPeak: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "working_set_peak_bytes"),
"Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.",
"Current size, in bytes, of the virtual address space that the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
WorkingSet: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "working_set_bytes"),
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.",
prometheus.BuildFQName(Namespace, subsystem, "working_set"),
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
[]string{"process", "process_id", "creating_process_id"},
nil,
),
processIncludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *processInclude)),
processExcludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *processExclude)),
queryWhereClause: *processWhereClause,
}, nil
}
type perflibProcess struct {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *ProcessCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting process metrics:", desc, err)
return err
}
return nil
}
// Win32_PerfRawData_PerfProc_Process docs:
// - https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx
type Win32_PerfRawData_PerfProc_Process struct {
Name string
PercentProcessorTime float64 `perflib:"% Processor Time"`
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
PercentUserTime float64 `perflib:"% User Time"`
CreatingProcessID float64 `perflib:"Creating Process ID"`
ElapsedTime float64 `perflib:"Elapsed Time"`
HandleCount float64 `perflib:"Handle Count"`
IDProcess float64 `perflib:"ID Process"`
IODataBytesPerSec float64 `perflib:"IO Data Bytes/sec"`
IODataOperationsPerSec float64 `perflib:"IO Data Operations/sec"`
IOOtherBytesPerSec float64 `perflib:"IO Other Bytes/sec"`
IOOtherOperationsPerSec float64 `perflib:"IO Other Operations/sec"`
IOReadBytesPerSec float64 `perflib:"IO Read Bytes/sec"`
IOReadOperationsPerSec float64 `perflib:"IO Read Operations/sec"`
IOWriteBytesPerSec float64 `perflib:"IO Write Bytes/sec"`
IOWriteOperationsPerSec float64 `perflib:"IO Write Operations/sec"`
PageFaultsPerSec float64 `perflib:"Page Faults/sec"`
PageFileBytesPeak float64 `perflib:"Page File Bytes Peak"`
PageFileBytes float64 `perflib:"Page File Bytes"`
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
PriorityBase float64 `perflib:"Priority Base"`
PrivateBytes float64 `perflib:"Private Bytes"`
ThreadCount float64 `perflib:"Thread Count"`
VirtualBytesPeak float64 `perflib:"Virtual Bytes Peak"`
VirtualBytes float64 `perflib:"Virtual Bytes"`
WorkingSetPrivate float64 `perflib:"Working Set - Private"`
WorkingSetPeak float64 `perflib:"Working Set Peak"`
WorkingSet float64 `perflib:"Working Set"`
CreatingProcessID uint32
ElapsedTime uint64
Frequency_Object uint64
HandleCount uint32
IDProcess uint32
IODataBytesPersec uint64
IODataOperationsPersec uint64
IOOtherBytesPersec uint64
IOOtherOperationsPersec uint64
IOReadBytesPersec uint64
IOReadOperationsPersec uint64
IOWriteBytesPersec uint64
IOWriteOperationsPersec uint64
PageFaultsPersec uint32
PageFileBytes uint64
PageFileBytesPeak uint64
PercentPrivilegedTime uint64
PercentProcessorTime uint64
PercentUserTime uint64
PoolNonpagedBytes uint32
PoolPagedBytes uint32
PriorityBase uint32
PrivateBytes uint64
ThreadCount uint32
Timestamp_Object uint64
VirtualBytes uint64
VirtualBytesPeak uint64
WorkingSet uint64
WorkingSetPeak uint64
WorkingSetPrivate uint64
}
type WorkerProcess struct {
AppPoolName string
ProcessId uint64
ProcessId uint32
}
func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcess, 0)
err := unmarshalObject(ctx.perfObjects["Process"], &data, c.logger)
if err != nil {
return err
func (c *ProcessCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_PerfProc_Process
q := queryAllWhere(&dst, c.queryWhereClause)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
var dst_wp []WorkerProcess
q_wp := queryAll(&dst_wp, c.logger)
q_wp := queryAll(&dst_wp)
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err))
log.Debugf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err)
}
for _, process := range data {
if process.Name == "_Total" ||
c.processExcludePattern.MatchString(process.Name) ||
!c.processIncludePattern.MatchString(process.Name) {
for _, process := range dst {
if process.Name == "_Total" {
continue
}
// Duplicate processes are suffixed # and an index number. Remove those.
@@ -272,7 +208,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
cpid := strconv.FormatUint(uint64(process.CreatingProcessID), 10)
for _, wp := range dst_wp {
if wp.ProcessId == uint64(process.IDProcess) {
if wp.ProcessId == process.IDProcess {
processName = strings.Join([]string{processName, wp.AppPoolName}, "_")
break
}
@@ -281,7 +217,8 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.StartTime,
prometheus.GaugeValue,
process.ElapsedTime,
// convert from Windows timestamp (1 jan 1601) to unix timestamp (1 jan 1970)
float64(process.ElapsedTime-116444736000000000)/float64(process.Frequency_Object),
processName,
pid,
cpid,
@@ -290,7 +227,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.HandleCount,
prometheus.GaugeValue,
process.HandleCount,
float64(process.HandleCount),
processName,
pid,
cpid,
@@ -299,7 +236,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.CPUTimeTotal,
prometheus.CounterValue,
process.PercentPrivilegedTime,
float64(process.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
processName,
pid,
cpid,
@@ -309,7 +246,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.CPUTimeTotal,
prometheus.CounterValue,
process.PercentUserTime,
float64(process.PercentUserTime)*ticksToSecondsScaleFactor,
processName,
pid,
cpid,
@@ -319,7 +256,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
prometheus.CounterValue,
process.IOOtherBytesPerSec,
float64(process.IOOtherBytesPersec),
processName,
pid,
cpid,
@@ -329,7 +266,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
prometheus.CounterValue,
process.IOOtherOperationsPerSec,
float64(process.IOOtherOperationsPersec),
processName,
pid,
cpid,
@@ -339,7 +276,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
prometheus.CounterValue,
process.IOReadBytesPerSec,
float64(process.IOReadBytesPersec),
processName,
pid,
cpid,
@@ -349,7 +286,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
prometheus.CounterValue,
process.IOReadOperationsPerSec,
float64(process.IOReadOperationsPersec),
processName,
pid,
cpid,
@@ -359,7 +296,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
prometheus.CounterValue,
process.IOWriteBytesPerSec,
float64(process.IOWriteBytesPersec),
processName,
pid,
cpid,
@@ -369,7 +306,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
prometheus.CounterValue,
process.IOWriteOperationsPerSec,
float64(process.IOWriteOperationsPersec),
processName,
pid,
cpid,
@@ -379,7 +316,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.PageFaultsTotal,
prometheus.CounterValue,
process.PageFaultsPerSec,
float64(process.PageFaultsPersec),
processName,
pid,
cpid,
@@ -388,7 +325,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.PageFileBytes,
prometheus.GaugeValue,
process.PageFileBytes,
float64(process.PageFileBytes),
processName,
pid,
cpid,
@@ -397,7 +334,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.PoolBytes,
prometheus.GaugeValue,
process.PoolNonpagedBytes,
float64(process.PoolNonpagedBytes),
processName,
pid,
cpid,
@@ -407,7 +344,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.PoolBytes,
prometheus.GaugeValue,
process.PoolPagedBytes,
float64(process.PoolPagedBytes),
processName,
pid,
cpid,
@@ -417,7 +354,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.PriorityBase,
prometheus.GaugeValue,
process.PriorityBase,
float64(process.PriorityBase),
processName,
pid,
cpid,
@@ -426,7 +363,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.PrivateBytes,
prometheus.GaugeValue,
process.PrivateBytes,
float64(process.PrivateBytes),
processName,
pid,
cpid,
@@ -435,7 +372,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.ThreadCount,
prometheus.GaugeValue,
process.ThreadCount,
float64(process.ThreadCount),
processName,
pid,
cpid,
@@ -444,25 +381,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.VirtualBytes,
prometheus.GaugeValue,
process.VirtualBytes,
processName,
pid,
cpid,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPrivate,
prometheus.GaugeValue,
process.WorkingSetPrivate,
processName,
pid,
cpid,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPeak,
prometheus.GaugeValue,
process.WorkingSetPeak,
float64(process.VirtualBytes),
processName,
pid,
cpid,
@@ -471,12 +390,12 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
ch <- prometheus.MustNewConstMetric(
c.WorkingSet,
prometheus.GaugeValue,
process.WorkingSet,
float64(process.WorkingSet),
processName,
pid,
cpid,
)
}
return nil
return nil, nil
}

View File

@@ -1,14 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkProcessCollector(b *testing.B) {
// Include is not set in testing context (kingpin flags not parsed), causing the collector to skip all processes.
localProcessInclude := ".+"
processInclude = &localProcessInclude
// No context name required as collector source is WMI
benchmarkCollector(b, "", newProcessCollector)
}

View File

@@ -1,193 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"fmt"
"sync"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// Base metrics returned by Prometheus
var (
scrapeDurationDesc = prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "exporter", "collector_duration_seconds"),
"windows_exporter: Duration of a collection.",
[]string{"collector"},
nil,
)
scrapeSuccessDesc = prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "exporter", "collector_success"),
"windows_exporter: Whether the collector was successful.",
[]string{"collector"},
nil,
)
scrapeTimeoutDesc = prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "exporter", "collector_timeout"),
"windows_exporter: Whether the collector timed out.",
[]string{"collector"},
nil,
)
snapshotDuration = prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "exporter", "perflib_snapshot_duration_seconds"),
"Duration of perflib snapshot capture",
nil,
nil,
)
)
// Prometheus implements prometheus.Collector for a set of Windows collectors.
type Prometheus struct {
maxScrapeDuration time.Duration
collectors map[string]Collector
logger log.Logger
}
// NewPrometheus returns a new Prometheus where the set of collectors must
// return metrics within the given timeout.
func NewPrometheus(timeout time.Duration, cs map[string]Collector, logger log.Logger) *Prometheus {
return &Prometheus{
maxScrapeDuration: timeout,
collectors: cs,
logger: logger,
}
}
// Describe sends all the descriptors of the collectors included to
// the provided channel.
func (coll *Prometheus) Describe(ch chan<- *prometheus.Desc) {
ch <- scrapeDurationDesc
ch <- scrapeSuccessDesc
}
type collectorOutcome int
const (
pending collectorOutcome = iota
success
failed
)
// Collect sends the collected metrics from each of the collectors to
// prometheus.
func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
t := time.Now()
cs := make([]string, 0, len(coll.collectors))
for name := range coll.collectors {
cs = append(cs, name)
}
scrapeContext, err := PrepareScrapeContext(cs)
ch <- prometheus.MustNewConstMetric(
snapshotDuration,
prometheus.GaugeValue,
time.Since(t).Seconds(),
)
if err != nil {
ch <- prometheus.NewInvalidMetric(scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %v", err))
return
}
wg := sync.WaitGroup{}
wg.Add(len(coll.collectors))
collectorOutcomes := make(map[string]collectorOutcome)
for name := range coll.collectors {
collectorOutcomes[name] = pending
}
metricsBuffer := make(chan prometheus.Metric)
l := sync.Mutex{}
finished := false
go func() {
for m := range metricsBuffer {
l.Lock()
if !finished {
ch <- m
}
l.Unlock()
}
}()
for name, c := range coll.collectors {
go func(name string, c Collector) {
defer wg.Done()
outcome := execute(name, c, scrapeContext, metricsBuffer, coll.logger)
l.Lock()
if !finished {
collectorOutcomes[name] = outcome
}
l.Unlock()
}(name, c)
}
allDone := make(chan struct{})
go func() {
wg.Wait()
close(allDone)
close(metricsBuffer)
}()
// Wait until either all collectors finish, or timeout expires
select {
case <-allDone:
case <-time.After(coll.maxScrapeDuration):
}
l.Lock()
finished = true
remainingCollectorNames := make([]string, 0)
for name, outcome := range collectorOutcomes {
var successValue, timeoutValue float64
if outcome == pending {
timeoutValue = 1.0
remainingCollectorNames = append(remainingCollectorNames, name)
}
if outcome == success {
successValue = 1.0
}
ch <- prometheus.MustNewConstMetric(
scrapeSuccessDesc,
prometheus.GaugeValue,
successValue,
name,
)
ch <- prometheus.MustNewConstMetric(
scrapeTimeoutDesc,
prometheus.GaugeValue,
timeoutValue,
name,
)
}
if len(remainingCollectorNames) > 0 {
_ = level.Warn(coll.logger).Log("msg", fmt.Sprintf("Collection timed out, still waiting for %v", remainingCollectorNames))
}
l.Unlock()
}
func execute(name string, c Collector, ctx *ScrapeContext, ch chan<- prometheus.Metric, logger log.Logger) collectorOutcome {
t := time.Now()
err := c.Collect(ctx, ch)
duration := time.Since(t).Seconds()
ch <- prometheus.MustNewConstMetric(
scrapeDurationDesc,
prometheus.GaugeValue,
duration,
name,
)
if err != nil {
_ = level.Error(logger).Log("msg", fmt.Sprintf("collector %s failed after %fs", name, duration), "err", err)
return failed
}
_ = level.Debug(logger).Log("msg", fmt.Sprintf("collector %s succeeded after %fs.", name, duration))
return success
}

View File

@@ -1,349 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// A RemoteFxNetworkCollector is a Prometheus collector for
// WMI Win32_PerfRawData_Counters_RemoteFXNetwork & Win32_PerfRawData_Counters_RemoteFXGraphics metrics
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxnetwork/
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/
type RemoteFxCollector struct {
logger log.Logger
// net
BaseTCPRTT *prometheus.Desc
BaseUDPRTT *prometheus.Desc
CurrentTCPBandwidth *prometheus.Desc
CurrentTCPRTT *prometheus.Desc
CurrentUDPBandwidth *prometheus.Desc
CurrentUDPRTT *prometheus.Desc
TotalReceivedBytes *prometheus.Desc
TotalSentBytes *prometheus.Desc
UDPPacketsReceivedPersec *prometheus.Desc
UDPPacketsSentPersec *prometheus.Desc
//gfx
AverageEncodingTime *prometheus.Desc
FrameQuality *prometheus.Desc
FramesSkippedPerSecondInsufficientResources *prometheus.Desc
GraphicsCompressionratio *prometheus.Desc
InputFramesPerSecond *prometheus.Desc
OutputFramesPerSecond *prometheus.Desc
SourceFramesPerSecond *prometheus.Desc
}
// newRemoteFx ...
func newRemoteFx(logger log.Logger) (Collector, error) {
const subsystem = "remote_fx"
return &RemoteFxCollector{
logger: log.With(logger, "collector", subsystem),
// net
BaseTCPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_base_tcp_rtt_seconds"),
"Base TCP round-trip time (RTT) detected in seconds",
[]string{"session_name"},
nil,
),
BaseUDPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_base_udp_rtt_seconds"),
"Base UDP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
),
CurrentTCPBandwidth: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_tcp_bandwidth"),
"TCP Bandwidth detected in bytes per second.",
[]string{"session_name"},
nil,
),
CurrentTCPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_tcp_rtt_seconds"),
"Average TCP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
),
CurrentUDPBandwidth: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_udp_bandwidth"),
"UDP Bandwidth detected in bytes per second.",
[]string{"session_name"},
nil,
),
CurrentUDPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_udp_rtt_seconds"),
"Average UDP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
),
TotalReceivedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_received_bytes_total"),
"(TotalReceivedBytes)",
[]string{"session_name"},
nil,
),
TotalSentBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_sent_bytes_total"),
"(TotalSentBytes)",
[]string{"session_name"},
nil,
),
UDPPacketsReceivedPersec: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_udp_packets_received_total"),
"Rate in packets per second at which packets are received over UDP.",
[]string{"session_name"},
nil,
),
UDPPacketsSentPersec: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_udp_packets_sent_total"),
"Rate in packets per second at which packets are sent over UDP.",
[]string{"session_name"},
nil,
),
//gfx
AverageEncodingTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_average_encoding_time_seconds"),
"Average frame encoding time in seconds",
[]string{"session_name"},
nil,
),
FrameQuality: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_frame_quality"),
"Quality of the output frame expressed as a percentage of the quality of the source frame.",
[]string{"session_name"},
nil,
),
FramesSkippedPerSecondInsufficientResources: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_frames_skipped_insufficient_resource_total"),
"Number of frames skipped per second due to insufficient client resources.",
[]string{"session_name", "resource"},
nil,
),
GraphicsCompressionratio: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_graphics_compression_ratio"),
"Ratio of the number of bytes encoded to the number of bytes input.",
[]string{"session_name"},
nil,
),
InputFramesPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_input_frames_total"),
"Number of sources frames provided as input to RemoteFX graphics per second.",
[]string{"session_name"},
nil,
),
OutputFramesPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_output_frames_total"),
"Number of frames sent to the client per second.",
[]string{"session_name"},
nil,
),
SourceFramesPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_source_frames_total"),
"Number of frames composed by the source (DWM) per second.",
[]string{"session_name"},
nil,
),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *RemoteFxCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
return nil
}
type perflibRemoteFxNetwork struct {
Name string
BaseTCPRTT float64 `perflib:"Base TCP RTT"`
BaseUDPRTT float64 `perflib:"Base UDP RTT"`
CurrentTCPBandwidth float64 `perflib:"Current TCP Bandwidth"`
CurrentTCPRTT float64 `perflib:"Current TCP RTT"`
CurrentUDPBandwidth float64 `perflib:"Current UDP Bandwidth"`
CurrentUDPRTT float64 `perflib:"Current UDP RTT"`
TotalReceivedBytes float64 `perflib:"Total Received Bytes"`
TotalSentBytes float64 `perflib:"Total Sent Bytes"`
UDPPacketsReceivedPersec float64 `perflib:"UDP Packets Received/sec"`
UDPPacketsSentPersec float64 `perflib:"UDP Packets Sent/sec"`
}
func (c *RemoteFxCollector) collectRemoteFXNetworkCount(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteFxNetwork, 0)
err := unmarshalObject(ctx.perfObjects["RemoteFX Network"], &dst, c.logger)
if err != nil {
return nil, err
}
for _, d := range dst {
// only connect metrics for remote named sessions
n := strings.ToLower(d.Name)
if n == "" || n == "services" || n == "console" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.BaseTCPRTT,
prometheus.GaugeValue,
milliSecToSec(d.BaseTCPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BaseUDPRTT,
prometheus.GaugeValue,
milliSecToSec(d.BaseUDPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPBandwidth,
prometheus.GaugeValue,
(d.CurrentTCPBandwidth*1000)/8,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPRTT,
prometheus.GaugeValue,
milliSecToSec(d.CurrentTCPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPBandwidth,
prometheus.GaugeValue,
(d.CurrentUDPBandwidth*1000)/8,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPRTT,
prometheus.GaugeValue,
milliSecToSec(d.CurrentUDPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalReceivedBytes,
prometheus.CounterValue,
d.TotalReceivedBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalSentBytes,
prometheus.CounterValue,
d.TotalSentBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsReceivedPersec,
prometheus.CounterValue,
d.UDPPacketsReceivedPersec,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsSentPersec,
prometheus.CounterValue,
d.UDPPacketsSentPersec,
d.Name,
)
}
return nil, nil
}
type perflibRemoteFxGraphics struct {
Name string
AverageEncodingTime float64 `perflib:"Average Encoding Time"`
FrameQuality float64 `perflib:"Frame Quality"`
FramesSkippedPerSecondInsufficientClientResources float64 `perflib:"Frames Skipped/Second - Insufficient Server Resources"`
FramesSkippedPerSecondInsufficientNetworkResources float64 `perflib:"Frames Skipped/Second - Insufficient Network Resources"`
FramesSkippedPerSecondInsufficientServerResources float64 `perflib:"Frames Skipped/Second - Insufficient Client Resources"`
GraphicsCompressionratio float64 `perflib:"Graphics Compression ratio"`
InputFramesPerSecond float64 `perflib:"Input Frames/Second"`
OutputFramesPerSecond float64 `perflib:"Output Frames/Second"`
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
}
func (c *RemoteFxCollector) collectRemoteFXGraphicsCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteFxGraphics, 0)
err := unmarshalObject(ctx.perfObjects["RemoteFX Graphics"], &dst, c.logger)
if err != nil {
return nil, err
}
for _, d := range dst {
// only connect metrics for remote named sessions
n := strings.ToLower(d.Name)
if n == "" || n == "services" || n == "console" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.AverageEncodingTime,
prometheus.GaugeValue,
milliSecToSec(d.AverageEncodingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FrameQuality,
prometheus.GaugeValue,
d.FrameQuality,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientClientResources,
d.Name,
"client",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientNetworkResources,
d.Name,
"network",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientServerResources,
d.Name,
"server",
)
ch <- prometheus.MustNewConstMetric(
c.GraphicsCompressionratio,
prometheus.GaugeValue,
d.GraphicsCompressionratio,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.InputFramesPerSecond,
prometheus.CounterValue,
d.InputFramesPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutputFramesPerSecond,
prometheus.CounterValue,
d.OutputFramesPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SourceFramesPerSecond,
prometheus.CounterValue,
d.SourceFramesPerSecond,
d.Name,
)
}
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkRemoteFXCollector(b *testing.B) {
benchmarkCollector(b, "remote_fx", newRemoteFx)
}

View File

@@ -1,401 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"fmt"
"regexp"
"runtime"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
ole "github.com/go-ole/go-ole"
"github.com/go-ole/go-ole/oleutil"
"github.com/prometheus/client_golang/prometheus"
)
const (
FlagScheduledTaskOldExclude = "collector.scheduled_task.blacklist"
FlagScheduledTaskOldInclude = "collector.scheduled_task.whitelist"
FlagScheduledTaskExclude = "collector.scheduled_task.exclude"
FlagScheduledTaskInclude = "collector.scheduled_task.include"
)
var (
taskOldExclude *string
taskOldInclude *string
taskExclude *string
taskInclude *string
taskIncludeSet bool
taskExcludeSet bool
)
type ScheduledTaskCollector struct {
logger log.Logger
LastResult *prometheus.Desc
MissedRuns *prometheus.Desc
State *prometheus.Desc
taskIncludePattern *regexp.Regexp
taskExcludePattern *regexp.Regexp
}
// TaskState ...
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_state
type TaskState uint
type TaskResult uint
const (
TASK_STATE_UNKNOWN TaskState = iota
TASK_STATE_DISABLED
TASK_STATE_QUEUED
TASK_STATE_READY
TASK_STATE_RUNNING
TASK_RESULT_SUCCESS TaskResult = 0x0
)
// RegisteredTask ...
type ScheduledTask struct {
Name string
Path string
Enabled bool
State TaskState
MissedRunsCount float64
LastTaskResult TaskResult
}
type ScheduledTasks []ScheduledTask
// newScheduledTask ...
func newScheduledTaskFlags(app *kingpin.Application) {
taskInclude = app.Flag(
FlagScheduledTaskInclude,
"Regexp of tasks to include. Task path must both match include and not match exclude to be included.",
).Default(".+").PreAction(func(c *kingpin.ParseContext) error {
taskIncludeSet = true
return nil
}).String()
taskExclude = app.Flag(
FlagScheduledTaskExclude,
"Regexp of tasks to exclude. Task path must both match include and not match exclude to be included.",
).Default("").PreAction(func(c *kingpin.ParseContext) error {
taskExcludeSet = true
return nil
}).String()
taskOldInclude = app.Flag(
FlagScheduledTaskOldInclude,
"DEPRECATED: Use --collector.scheduled_task.include",
).Hidden().String()
taskOldExclude = app.Flag(
FlagScheduledTaskOldExclude,
"DEPRECATED: Use --collector.scheduled_task.exclude",
).Hidden().String()
}
// newScheduledTask ...
func newScheduledTask(logger log.Logger) (Collector, error) {
const subsystem = "scheduled_task"
logger = log.With(logger, "collector", subsystem)
if *taskOldExclude != "" {
if !taskExcludeSet {
_ = level.Warn(logger).Log("msg", "--collector.scheduled_task.blacklist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.exclude")
*taskExclude = *taskOldExclude
} else {
return nil, errors.New("--collector.scheduled_task.blacklist and --collector.scheduled_task.exclude are mutually exclusive")
}
}
if *taskOldInclude != "" {
if !taskIncludeSet {
_ = level.Warn(logger).Log("msg", "--collector.scheduled_task.whitelist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.include")
*taskInclude = *taskOldInclude
} else {
return nil, errors.New("--collector.scheduled_task.whitelist and --collector.scheduled_task.include are mutually exclusive")
}
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
if err != nil {
code := err.(*ole.OleError).Code()
if code != ole.S_OK && code != S_FALSE {
return nil, err
}
}
defer ole.CoUninitialize()
return &ScheduledTaskCollector{
logger: logger,
LastResult: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "last_result"),
"The result that was returned the last time the registered task was run",
[]string{"task"},
nil,
),
MissedRuns: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "missed_runs"),
"The number of times the registered task missed a scheduled run",
[]string{"task"},
nil,
),
State: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"The current state of a scheduled task",
[]string{"task", "state"},
nil,
),
taskIncludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *taskInclude)),
taskExcludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *taskExclude)),
}, nil
}
func (c *ScheduledTaskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
return err
}
return nil
}
var TASK_STATES = []string{"disabled", "queued", "ready", "running", "unknown"}
func (c *ScheduledTaskCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
scheduledTasks, err := getScheduledTasks()
if err != nil {
return nil, err
}
for _, task := range scheduledTasks {
if c.taskExcludePattern.MatchString(task.Path) ||
!c.taskIncludePattern.MatchString(task.Path) {
continue
}
lastResult := 0.0
if task.LastTaskResult == TASK_RESULT_SUCCESS {
lastResult = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.LastResult,
prometheus.GaugeValue,
lastResult,
task.Path,
)
ch <- prometheus.MustNewConstMetric(
c.MissedRuns,
prometheus.GaugeValue,
task.MissedRunsCount,
task.Path,
)
for _, state := range TASK_STATES {
var stateValue float64
if strings.ToLower(task.State.String()) == state {
stateValue = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.State,
prometheus.GaugeValue,
stateValue,
task.Path,
state,
)
}
}
return nil, nil
}
const SCHEDULED_TASK_PROGRAM_ID = "Schedule.Service.1"
// S_FALSE is returned by CoInitialize if it was already called on this thread.
const S_FALSE = 0x00000001
func getScheduledTasks() (scheduledTasks ScheduledTasks, err error) {
schedClassID, err := ole.ClassIDFrom(SCHEDULED_TASK_PROGRAM_ID)
if err != nil {
return scheduledTasks, err
}
taskSchedulerObj, err := ole.CreateInstance(schedClassID, nil)
if err != nil || taskSchedulerObj == nil {
return scheduledTasks, err
}
defer taskSchedulerObj.Release()
taskServiceObj := taskSchedulerObj.MustQueryInterface(ole.IID_IDispatch)
_, err = oleutil.CallMethod(taskServiceObj, "Connect")
if err != nil {
return scheduledTasks, err
}
defer taskServiceObj.Release()
res, err := oleutil.CallMethod(taskServiceObj, "GetFolder", `\`)
if err != nil {
return scheduledTasks, err
}
rootFolderObj := res.ToIDispatch()
defer rootFolderObj.Release()
err = fetchTasksRecursively(rootFolderObj, &scheduledTasks)
return scheduledTasks, err
}
func fetchTasksInFolder(folder *ole.IDispatch, scheduledTasks *ScheduledTasks) error {
res, err := oleutil.CallMethod(folder, "GetTasks", 1)
if err != nil {
return err
}
tasks := res.ToIDispatch()
defer tasks.Release()
err = oleutil.ForEach(tasks, func(v *ole.VARIANT) error {
task := v.ToIDispatch()
defer task.Release()
parsedTask, err := parseTask(task)
if err != nil {
return err
}
*scheduledTasks = append(*scheduledTasks, parsedTask)
return nil
})
return err
}
func fetchTasksRecursively(folder *ole.IDispatch, scheduledTasks *ScheduledTasks) error {
if err := fetchTasksInFolder(folder, scheduledTasks); err != nil {
return err
}
res, err := oleutil.CallMethod(folder, "GetFolders", 1)
if err != nil {
return err
}
subFolders := res.ToIDispatch()
defer subFolders.Release()
err = oleutil.ForEach(subFolders, func(v *ole.VARIANT) error {
subFolder := v.ToIDispatch()
defer subFolder.Release()
return fetchTasksRecursively(subFolder, scheduledTasks)
})
return err
}
func parseTask(task *ole.IDispatch) (scheduledTask ScheduledTask, err error) {
taskNameVar, err := oleutil.GetProperty(task, "Name")
if err != nil {
return scheduledTask, err
}
defer func() {
if tempErr := taskNameVar.Clear(); tempErr != nil {
err = tempErr
}
}()
taskPathVar, err := oleutil.GetProperty(task, "Path")
if err != nil {
return scheduledTask, err
}
defer func() {
if tempErr := taskPathVar.Clear(); tempErr != nil {
err = tempErr
}
}()
taskEnabledVar, err := oleutil.GetProperty(task, "Enabled")
if err != nil {
return scheduledTask, err
}
defer func() {
if tempErr := taskEnabledVar.Clear(); tempErr != nil {
err = tempErr
}
}()
taskStateVar, err := oleutil.GetProperty(task, "State")
if err != nil {
return scheduledTask, err
}
defer func() {
if tempErr := taskStateVar.Clear(); tempErr != nil {
err = tempErr
}
}()
taskNumberOfMissedRunsVar, err := oleutil.GetProperty(task, "NumberOfMissedRuns")
if err != nil {
return scheduledTask, err
}
defer func() {
if tempErr := taskNumberOfMissedRunsVar.Clear(); tempErr != nil {
err = tempErr
}
}()
taskLastTaskResultVar, err := oleutil.GetProperty(task, "LastTaskResult")
if err != nil {
return scheduledTask, err
}
defer func() {
if tempErr := taskLastTaskResultVar.Clear(); tempErr != nil {
err = tempErr
}
}()
scheduledTask.Name = taskNameVar.ToString()
scheduledTask.Path = strings.ReplaceAll(taskPathVar.ToString(), "\\", "/")
scheduledTask.Enabled = taskEnabledVar.Value().(bool)
scheduledTask.State = TaskState(taskStateVar.Val)
scheduledTask.MissedRunsCount = float64(taskNumberOfMissedRunsVar.Val)
scheduledTask.LastTaskResult = TaskResult(taskLastTaskResultVar.Val)
return scheduledTask, err
}
func (t TaskState) String() string {
switch t {
case TASK_STATE_UNKNOWN:
return "Unknown"
case TASK_STATE_DISABLED:
return "Disabled"
case TASK_STATE_QUEUED:
return "Queued"
case TASK_STATE_READY:
return "Ready"
case TASK_STATE_RUNNING:
return "Running"
default:
return ""
}
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkScheduledTaskCollector(b *testing.B) {
benchmarkCollector(b, "scheduled_task", newScheduledTask)
}

View File

@@ -1,77 +1,45 @@
//go:build windows
// +build windows
package collector
import (
"fmt"
"strings"
"syscall"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc/mgr"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
FlagServiceWhereClause = "collector.service.services-where"
FlagServiceUseAPI = "collector.service.use-api"
)
func init() {
registerCollector("service", NewserviceCollector)
}
var (
serviceWhereClause *string
useAPI *bool
serviceWhereClause = kingpin.Flag(
"collector.service.services-where",
"WQL 'where' clause to use in WMI metrics query. Limits the response to the services you specify and reduces the size of the response.",
).Default("").String()
)
// A serviceCollector is a Prometheus collector for WMI Win32_Service metrics
type serviceCollector struct {
logger log.Logger
Information *prometheus.Desc
State *prometheus.Desc
StartMode *prometheus.Desc
Status *prometheus.Desc
State *prometheus.Desc
StartMode *prometheus.Desc
Status *prometheus.Desc
queryWhereClause string
}
// newServiceCollectorFlags ...
func newServiceCollectorFlags(app *kingpin.Application) {
serviceWhereClause = app.Flag(
FlagServiceWhereClause,
"WQL 'where' clause to use in WMI metrics query. Limits the response to the services you specify and reduces the size of the response.",
).Default("").String()
useAPI = app.Flag(
FlagServiceUseAPI,
"Use API calls to collect service data instead of WMI. Flag 'collector.service.services-where' won't be effective.",
).Default("false").Bool()
}
// newserviceCollector ...
func newserviceCollector(logger log.Logger) (Collector, error) {
// NewserviceCollector ...
func NewserviceCollector() (Collector, error) {
const subsystem = "service"
logger = log.With(logger, "collector", subsystem)
if *serviceWhereClause == "" {
_ = level.Warn(logger).Log("msg", "No where-clause specified for service collector. This will generate a very large number of metrics!")
}
if *useAPI {
_ = level.Warn(logger).Log("msg", "API collection is enabled.")
log.Warn("No where-clause specified for service collector. This will generate a very large number of metrics!")
}
return &serviceCollector{
logger: logger,
Information: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "info"),
"A metric with a constant '1' value labeled with service information",
[]string{"name", "display_name", "process_id", "run_as"},
nil,
),
State: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"The state of the service (State)",
@@ -97,16 +65,9 @@ func newserviceCollector(logger log.Logger) (Collector, error) {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if *useAPI {
if err := c.collectAPI(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err)
return err
}
} else {
if err := c.collectWMI(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting WMI service metrics:", "err", err)
return err
}
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting service metrics:", desc, err)
return err
}
return nil
}
@@ -114,13 +75,10 @@ func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
// Win32_Service docs:
// - https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx
type Win32_Service struct {
DisplayName string
Name string
ProcessId uint32
State string
Status string
StartMode string
StartName *string
Name string
State string
Status string
StartMode string
}
var (
@@ -134,15 +92,6 @@ var (
"paused",
"unknown",
}
apiStateValues = map[uint]string{
windows.SERVICE_CONTINUE_PENDING: "continue pending",
windows.SERVICE_PAUSE_PENDING: "pause pending",
windows.SERVICE_PAUSED: "paused",
windows.SERVICE_RUNNING: "running",
windows.SERVICE_START_PENDING: "start pending",
windows.SERVICE_STOP_PENDING: "stop pending",
windows.SERVICE_STOPPED: "stopped",
}
allStartModes = []string{
"boot",
"system",
@@ -150,13 +99,6 @@ var (
"manual",
"disabled",
}
apiStartModeValues = map[uint32]string{
windows.SERVICE_AUTO_START: "auto",
windows.SERVICE_BOOT_START: "boot",
windows.SERVICE_DEMAND_START: "manual",
windows.SERVICE_DISABLED: "disabled",
windows.SERVICE_SYSTEM_START: "system",
}
allStatuses = []string{
"ok",
"error",
@@ -173,29 +115,14 @@ var (
}
)
func (c *serviceCollector) collectWMI(ch chan<- prometheus.Metric) error {
func (c *serviceCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_Service
q := queryAllWhere(&dst, c.queryWhereClause, c.logger)
q := queryAllWhere(&dst, c.queryWhereClause)
if err := wmi.Query(q, &dst); err != nil {
return err
return nil, err
}
for _, service := range dst {
pid := fmt.Sprintf("%d", uint64(service.ProcessId))
runAs := ""
if service.StartName != nil {
runAs = *service.StartName
}
ch <- prometheus.MustNewConstMetric(
c.Information,
prometheus.GaugeValue,
1.0,
strings.ToLower(service.Name),
service.DisplayName,
pid,
runAs,
)
for _, state := range allStates {
isCurrentState := 0.0
if state == strings.ToLower(service.State) {
@@ -238,95 +165,5 @@ func (c *serviceCollector) collectWMI(ch chan<- prometheus.Metric) error {
)
}
}
return nil
}
func (c *serviceCollector) collectAPI(ch chan<- prometheus.Metric) error {
svcmgrConnection, err := mgr.Connect()
if err != nil {
return err
}
defer svcmgrConnection.Disconnect() //nolint:errcheck
// List All Services from the Services Manager.
serviceList, err := svcmgrConnection.ListServices()
if err != nil {
return err
}
// Iterate through the Services List.
for _, service := range serviceList {
// Get UTF16 service name.
serviceName, err := syscall.UTF16PtrFromString(service)
if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Service %s get name error: %#v", service, err))
continue
}
// Open connection for service handler.
serviceHandle, err := windows.OpenService(svcmgrConnection.Handle, serviceName, windows.GENERIC_READ)
if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Open service %s error: %#v", service, err))
continue
}
// Create handle for each service.
serviceManager := &mgr.Service{Name: service, Handle: serviceHandle}
defer serviceManager.Close()
// Get Service Configuration.
serviceConfig, err := serviceManager.Config()
if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Get ervice %s config error: %#v", service, err))
continue
}
// Get Service Current Status.
serviceStatus, err := serviceManager.Query()
if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Get service %s status error: %#v", service, err))
continue
}
pid := fmt.Sprintf("%d", uint64(serviceStatus.ProcessId))
ch <- prometheus.MustNewConstMetric(
c.Information,
prometheus.GaugeValue,
1.0,
strings.ToLower(service),
serviceConfig.DisplayName,
pid,
serviceConfig.ServiceStartName,
)
for _, state := range apiStateValues {
isCurrentState := 0.0
if state == apiStateValues[uint(serviceStatus.State)] {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.State,
prometheus.GaugeValue,
isCurrentState,
strings.ToLower(service),
state,
)
}
for _, startMode := range apiStartModeValues {
isCurrentStartMode := 0.0
if startMode == apiStartModeValues[serviceConfig.StartType] {
isCurrentStartMode = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.StartMode,
prometheus.GaugeValue,
isCurrentStartMode,
strings.ToLower(service),
startMode,
)
}
}
return nil
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkServiceCollector(b *testing.B) {
benchmarkCollector(b, "service", newserviceCollector)
}

View File

@@ -1,756 +0,0 @@
//go:build windows
// +build windows
package collector
import (
"errors"
"fmt"
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
const (
FlagSmtpServerOldExclude = "collector.smtp.server-blacklist"
FlagSmtpServerOldInclude = "collector.smtp.server-whitelist"
FlagSmtpServerExclude = "collector.smtp.server-exclude"
FlagSmtpServerInclude = "collector.smtp.server-include"
)
var (
serverOldInclude *string
serverOldExclude *string
serverInclude *string
serverExclude *string
serverIncludeSet bool
serverExcludeSet bool
)
type SMTPCollector struct {
logger log.Logger
BadmailedMessagesBadPickupFileTotal *prometheus.Desc
BadmailedMessagesGeneralFailureTotal *prometheus.Desc
BadmailedMessagesHopCountExceededTotal *prometheus.Desc
BadmailedMessagesNDROfDSNTotal *prometheus.Desc
BadmailedMessagesNoRecipientsTotal *prometheus.Desc
BadmailedMessagesTriggeredViaEventTotal *prometheus.Desc
BytesSentTotal *prometheus.Desc
BytesReceivedTotal *prometheus.Desc
CategorizerQueueLength *prometheus.Desc
ConnectionErrorsTotal *prometheus.Desc
CurrentMessagesInLocalDelivery *prometheus.Desc
DirectoryDropsTotal *prometheus.Desc
DNSQueriesTotal *prometheus.Desc
DSNFailuresTotal *prometheus.Desc
ETRNMessagesTotal *prometheus.Desc
InboundConnectionsCurrent *prometheus.Desc
InboundConnectionsTotal *prometheus.Desc
LocalQueueLength *prometheus.Desc
LocalRetryQueueLength *prometheus.Desc
MailFilesOpen *prometheus.Desc
MessageBytesReceivedTotal *prometheus.Desc
MessageBytesSentTotal *prometheus.Desc
MessageDeliveryRetriesTotal *prometheus.Desc
MessageSendRetriesTotal *prometheus.Desc
MessagesCurrentlyUndeliverable *prometheus.Desc
MessagesDeliveredTotal *prometheus.Desc
MessagesPendingRouting *prometheus.Desc
MessagesReceivedTotal *prometheus.Desc
MessagesRefusedForAddressObjectsTotal *prometheus.Desc
MessagesRefusedForMailObjectsTotal *prometheus.Desc
MessagesRefusedForSizeTotal *prometheus.Desc
MessagesSentTotal *prometheus.Desc
MessagesSubmittedTotal *prometheus.Desc
NDRsGeneratedTotal *prometheus.Desc
OutboundConnectionsCurrent *prometheus.Desc
OutboundConnectionsRefusedTotal *prometheus.Desc
OutboundConnectionsTotal *prometheus.Desc
QueueFilesOpen *prometheus.Desc
PickupDirectoryMessagesRetrievedTotal *prometheus.Desc
RemoteQueueLength *prometheus.Desc
RemoteRetryQueueLength *prometheus.Desc
RoutingTableLookupsTotal *prometheus.Desc
serverIncludePattern *regexp.Regexp
serverExcludePattern *regexp.Regexp
}
func newSMTPCollectorFlags(app *kingpin.Application) {
serverInclude = app.Flag(
FlagSmtpServerInclude,
"Regexp of virtual servers to include. Server name must both match include and not match exclude to be included.",
).Default(".+").PreAction(func(c *kingpin.ParseContext) error {
serverIncludeSet = true
return nil
}).String()
serverExclude = app.Flag(
FlagSmtpServerExclude,
"Regexp of virtual servers to exclude. Server name must both match include and not match exclude to be included.",
).Default("").PreAction(func(c *kingpin.ParseContext) error {
serverExcludeSet = true
return nil
}).String()
serverOldInclude = app.Flag(
FlagSmtpServerOldInclude,
"DEPRECATED: Use --collector.smtp.server-include",
).Hidden().String()
serverOldExclude = app.Flag(
FlagSmtpServerOldExclude,
"DEPRECATED: Use --collector.smtp.server-exclude",
).Hidden().String()
}
func newSMTPCollector(logger log.Logger) (Collector, error) {
const subsystem = "smtp"
logger = log.With(logger, "collector", subsystem)
_ = level.Info(logger).Log("msg", "smtp collector is in an experimental state! Metrics for this collector have not been tested.")
if *serverOldExclude != "" {
if !serverExcludeSet {
_ = level.Warn(logger).Log("msg", "--collector.smtp.server-blacklist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-exclude")
*serverExclude = *serverOldExclude
} else {
return nil, errors.New("--collector.smtp.server-blacklist and --collector.smtp.server-exclude are mutually exclusive")
}
}
if *serverOldInclude != "" {
if !serverIncludeSet {
_ = level.Warn(logger).Log("msg", "--collector.smtp.server-whitelist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-include")
*serverInclude = *serverOldInclude
} else {
return nil, errors.New("--collector.smtp.server-whitelist and --collector.smtp.server-include are mutually exclusive")
}
}
return &SMTPCollector{
logger: logger,
BadmailedMessagesBadPickupFileTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_bad_pickup_file_total"),
"Total number of malformed pickup messages sent to badmail",
[]string{"site"},
nil,
),
BadmailedMessagesGeneralFailureTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_general_failure_total"),
"Total number of messages sent to badmail for reasons not associated with a specific counter",
[]string{"site"},
nil,
),
BadmailedMessagesHopCountExceededTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_hop_count_exceeded_total"),
"Total number of messages sent to badmail because they had exceeded the maximum hop count",
[]string{"site"},
nil,
),
BadmailedMessagesNDROfDSNTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_ndr_of_dns_total"),
"Total number of Delivery Status Notifications sent to badmail because they could not be delivered",
[]string{"site"},
nil,
),
BadmailedMessagesNoRecipientsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_no_recipients_total"),
"Total number of messages sent to badmail because they had no recipients",
[]string{"site"},
nil,
),
BadmailedMessagesTriggeredViaEventTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_triggered_via_event_total"),
"Total number of messages sent to badmail at the request of a server event sink",
[]string{"site"},
nil,
),
BytesSentTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "bytes_sent_total"),
"Total number of bytes sent",
[]string{"site"},
nil,
),
BytesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "bytes_received_total"),
"Total number of bytes received",
[]string{"site"},
nil,
),
CategorizerQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "categorizer_queue_length"),
"Number of messages in the categorizer queue",
[]string{"site"},
nil,
),
ConnectionErrorsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_errors_total"),
"Total number of connection errors",
[]string{"site"},
nil,
),
CurrentMessagesInLocalDelivery: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "current_messages_in_local_delivery"),
"Number of messages that are currently being processed by a server event sink for local delivery",
[]string{"site"},
nil,
),
DirectoryDropsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "directory_drops_total"),
"Total number of messages placed in a drop directory",
[]string{"site"},
nil,
),
DSNFailuresTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "dsn_failures_total"),
"Total number of failed DSN generation attempts",
[]string{"site"},
nil,
),
DNSQueriesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "dns_queries_total"),
"Total number of DNS lookups",
[]string{"site"},
nil,
),
ETRNMessagesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "etrn_messages_total"),
"Total number of ETRN messages received by the server",
[]string{"site"},
nil,
),
InboundConnectionsCurrent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "inbound_connections_current"),
"Total number of connections currently inbound",
[]string{"site"},
nil,
),
InboundConnectionsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "inbound_connections_total"),
"Total number of inbound connections received",
[]string{"site"},
nil,
),
LocalQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "local_queue_length"),
"Number of messages in the local queue",
[]string{"site"},
nil,
),
LocalRetryQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "local_retry_queue_length"),
"Number of messages in the local retry queue",
[]string{"site"},
nil,
),
MailFilesOpen: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mail_files_open"),
"Number of handles to open mail files",
[]string{"site"},
nil,
),
MessageBytesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "message_bytes_received_total"),
"Total number of bytes received in messages",
[]string{"site"},
nil,
),
MessageBytesSentTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "message_bytes_sent_total"),
"Total number of bytes sent in messages",
[]string{"site"},
nil,
),
MessageDeliveryRetriesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "message_delivery_retries_total"),
"Total number of local deliveries that were retried",
[]string{"site"},
nil,
),
MessageSendRetriesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "message_send_retries_total"),
"Total number of outbound message sends that were retried",
[]string{"site"},
nil,
),
MessagesCurrentlyUndeliverable: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_currently_undeliverable"),
"Number of messages that have been reported as currently undeliverable by routing",
[]string{"site"},
nil,
),
MessagesDeliveredTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_delivered_total"),
"Total number of messages delivered to local mailboxes",
[]string{"site"},
nil,
),
MessagesPendingRouting: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_pending_routing"),
"Number of messages that have been categorized but not routed",
[]string{"site"},
nil,
),
MessagesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_received_total"),
"Total number of inbound messages accepted",
[]string{"site"},
nil,
),
MessagesRefusedForAddressObjectsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_refused_for_address_objects_total"),
"Total number of messages refused due to no address objects",
[]string{"site"},
nil,
),
MessagesRefusedForMailObjectsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_refused_for_mail_objects_total"),
"Total number of messages refused due to no mail objects",
[]string{"site"},
nil,
),
MessagesRefusedForSizeTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_refused_for_size_total"),
"Total number of messages rejected because they were too big",
[]string{"site"},
nil,
),
MessagesSentTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_sent_total"),
"Total number of outbound messages sent",
[]string{"site"},
nil,
),
MessagesSubmittedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "messages_submitted_total"),
"Total number of messages submitted to queuing for delivery",
[]string{"site"},
nil,
),
NDRsGeneratedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "ndrs_generated_total"),
"Total number of non-delivery reports that have been generated",
[]string{"site"},
nil,
),
OutboundConnectionsCurrent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "outbound_connections_current"),
"Number of connections currently outbound",
[]string{"site"},
nil,
),
OutboundConnectionsRefusedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "outbound_connections_refused_total"),
"Total number of connection attempts refused by remote sites",
[]string{"site"},
nil,
),
OutboundConnectionsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "outbound_connections_total"),
"Total number of outbound connections attempted",
[]string{"site"},
nil,
),
PickupDirectoryMessagesRetrievedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pickup_directory_messages_retrieved_total"),
"Total number of messages retrieved from the mail pick-up directory",
[]string{"site"},
nil,
),
QueueFilesOpen: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "queue_files_open"),
"Number of handles to open queue files",
[]string{"site"},
nil,
),
RemoteQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "remote_queue_length"),
"Number of messages in the remote queue",
[]string{"site"},
nil,
),
RemoteRetryQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "remote_retry_queue_length"),
"Number of messages in the retry queue for remote delivery",
[]string{"site"},
nil,
),
RoutingTableLookupsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "routing_table_lookups_total"),
"Total number of routing table lookups",
[]string{"site"},
nil,
),
serverIncludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *serverInclude)),
serverExcludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *serverExclude)),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *SMTPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting smtp metrics", "desc", desc, "err", err)
return err
}
return nil
}
// Perflib: "SMTP Server"
type PerflibSMTPServer struct {
Name string
BadmailedMessagesBadPickupFileTotal float64 `perflib:"Badmailed Messages (Bad Pickup File)"`
BadmailedMessagesGeneralFailureTotal float64 `perflib:"Badmailed Messages (General Failure)"`
BadmailedMessagesHopCountExceededTotal float64 `perflib:"Badmailed Messages (Hop Count Exceeded)"`
BadmailedMessagesNDROfDSNTotal float64 `perflib:"Badmailed Messages (NDR of DSN)"`
BadmailedMessagesNoRecipientsTotal float64 `perflib:"Badmailed Messages (No Recipients)"`
BadmailedMessagesTriggeredViaEventTotal float64 `perflib:"Badmailed Messages (Triggered via Event)"`
BytesSentTotal float64 `perflib:"Bytes Sent Total"`
BytesReceivedTotal float64 `perflib:"Bytes Received Total"`
CategorizerQueueLength float64 `perflib:"Categorizer Queue Length"`
ConnectionErrorsTotal float64 `perflib:"Total Connection Errors"`
CurrentMessagesInLocalDelivery float64 `perflib:"Current Messages in Local Delivery"`
DirectoryDropsTotal float64 `perflib:"Directory Drops Total"`
DNSQueriesTotal float64 `perflib:"DNS Queries Total"`
DSNFailuresTotal float64 `perflib:"Total DSN Failures"`
ETRNMessagesTotal float64 `perflib:"ETRN Messages Total"`
InboundConnectionsCurrent float64 `perflib:"Inbound Connections Current"`
InboundConnectionsTotal float64 `perflib:"Inbound Connections Total"`
LocalQueueLength float64 `perflib:"Local Queue Length"`
LocalRetryQueueLength float64 `perflib:"Local Retry Queue Length"`
MailFilesOpen float64 `perflib:"Number of MailFiles Open"`
MessageBytesReceivedTotal float64 `perflib:"Message Bytes Received Total"`
MessageBytesSentTotal float64 `perflib:"Message Bytes Sent Total"`
MessageDeliveryRetriesTotal float64 `perflib:"Message Delivery Retries"`
MessageSendRetriesTotal float64 `perflib:"Message Send Retries"`
MessagesCurrentlyUndeliverable float64 `perflib:"Messages Currently Undeliverable"`
MessagesDeliveredTotal float64 `perflib:"Messages Delivered Total"`
MessagesPendingRouting float64 `perflib:"Messages Pending Routing"`
MessagesReceivedTotal float64 `perflib:"Messages Received Total"`
MessagesRefusedForAddressObjectsTotal float64 `perflib:"Messages Refused for Address Objects"`
MessagesRefusedForMailObjectsTotal float64 `perflib:"Messages Refused for Mail Objects"`
MessagesRefusedForSizeTotal float64 `perflib:"Messages Refused for Size"`
MessagesSentTotal float64 `perflib:"Messages Sent Total"`
MessagesSubmittedTotal float64 `perflib:"Total messages submitted"`
NDRsGeneratedTotal float64 `perflib:"NDRs Generated"`
OutboundConnectionsCurrent float64 `perflib:"Outbound Connections Current"`
OutboundConnectionsRefusedTotal float64 `perflib:"Outbound Connections Refused"`
OutboundConnectionsTotal float64 `perflib:"Outbound Connections Total"`
QueueFilesOpen float64 `perflib:"Number of QueueFiles Open"`
PickupDirectoryMessagesRetrievedTotal float64 `perflib:"Pickup Directory Messages Retrieved Total"`
RemoteQueueLength float64 `perflib:"Remote Queue Length"`
RemoteRetryQueueLength float64 `perflib:"Remote Retry Queue Length"`
RoutingTableLookupsTotal float64 `perflib:"Routing Table Lookups Total"`
}
func (c *SMTPCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []PerflibSMTPServer
if err := unmarshalObject(ctx.perfObjects["SMTP Server"], &dst, c.logger); err != nil {
return nil, err
}
for _, server := range dst {
if server.Name == "_Total" ||
c.serverExcludePattern.MatchString(server.Name) ||
!c.serverIncludePattern.MatchString(server.Name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesBadPickupFileTotal,
prometheus.CounterValue,
server.BadmailedMessagesBadPickupFileTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesHopCountExceededTotal,
prometheus.CounterValue,
server.BadmailedMessagesHopCountExceededTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesNDROfDSNTotal,
prometheus.CounterValue,
server.BadmailedMessagesNDROfDSNTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesNoRecipientsTotal,
prometheus.CounterValue,
server.BadmailedMessagesNoRecipientsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesTriggeredViaEventTotal,
prometheus.CounterValue,
server.BadmailedMessagesTriggeredViaEventTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BytesSentTotal,
prometheus.CounterValue,
server.BytesSentTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BytesReceivedTotal,
prometheus.CounterValue,
server.BytesReceivedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CategorizerQueueLength,
prometheus.GaugeValue,
server.CategorizerQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionErrorsTotal,
prometheus.CounterValue,
server.ConnectionErrorsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentMessagesInLocalDelivery,
prometheus.GaugeValue,
server.CurrentMessagesInLocalDelivery,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DirectoryDropsTotal,
prometheus.CounterValue,
server.DirectoryDropsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DSNFailuresTotal,
prometheus.CounterValue,
server.DSNFailuresTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DNSQueriesTotal,
prometheus.CounterValue,
server.DNSQueriesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ETRNMessagesTotal,
prometheus.CounterValue,
server.ETRNMessagesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.InboundConnectionsTotal,
prometheus.CounterValue,
server.InboundConnectionsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.InboundConnectionsCurrent,
prometheus.GaugeValue,
server.InboundConnectionsCurrent,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.LocalQueueLength,
prometheus.GaugeValue,
server.LocalQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.LocalRetryQueueLength,
prometheus.GaugeValue,
server.LocalRetryQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MailFilesOpen,
prometheus.GaugeValue,
server.MailFilesOpen,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageBytesReceivedTotal,
prometheus.CounterValue,
server.MessageBytesReceivedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageBytesSentTotal,
prometheus.CounterValue,
server.MessageBytesSentTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageDeliveryRetriesTotal,
prometheus.CounterValue,
server.MessageDeliveryRetriesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageSendRetriesTotal,
prometheus.CounterValue,
server.MessageSendRetriesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesCurrentlyUndeliverable,
prometheus.GaugeValue,
server.MessagesCurrentlyUndeliverable,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesDeliveredTotal,
prometheus.CounterValue,
server.MessagesDeliveredTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesPendingRouting,
prometheus.GaugeValue,
server.MessagesPendingRouting,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesReceivedTotal,
prometheus.CounterValue,
server.MessagesReceivedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesRefusedForAddressObjectsTotal,
prometheus.CounterValue,
server.MessagesRefusedForAddressObjectsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesRefusedForMailObjectsTotal,
prometheus.CounterValue,
server.MessagesRefusedForMailObjectsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesRefusedForSizeTotal,
prometheus.CounterValue,
server.MessagesRefusedForSizeTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesSentTotal,
prometheus.CounterValue,
server.MessagesSentTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesSubmittedTotal,
prometheus.CounterValue,
server.MessagesSubmittedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NDRsGeneratedTotal,
prometheus.CounterValue,
server.NDRsGeneratedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutboundConnectionsCurrent,
prometheus.GaugeValue,
server.OutboundConnectionsCurrent,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutboundConnectionsRefusedTotal,
prometheus.CounterValue,
server.OutboundConnectionsRefusedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutboundConnectionsTotal,
prometheus.CounterValue,
server.OutboundConnectionsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.QueueFilesOpen,
prometheus.GaugeValue,
server.QueueFilesOpen,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PickupDirectoryMessagesRetrievedTotal,
prometheus.CounterValue,
server.PickupDirectoryMessagesRetrievedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RemoteQueueLength,
prometheus.GaugeValue,
server.RemoteQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RemoteRetryQueueLength,
prometheus.GaugeValue,
server.RemoteRetryQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RoutingTableLookupsTotal,
prometheus.CounterValue,
server.RoutingTableLookupsTotal,
server.Name,
)
}
return nil, nil
}

View File

@@ -1,9 +0,0 @@
package collector
import (
"testing"
)
func BenchmarkSmtpCollector(b *testing.B) {
benchmarkCollector(b, "smtp", newSMTPCollector)
}

View File

@@ -1,18 +1,18 @@
//go:build windows
// +build windows
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("system", NewSystemCollector, "System")
}
// A SystemCollector is a Prometheus collector for WMI metrics
type SystemCollector struct {
logger log.Logger
ContextSwitchesTotal *prometheus.Desc
ExceptionDispatchesTotal *prometheus.Desc
ProcessorQueueLength *prometheus.Desc
@@ -21,12 +21,11 @@ type SystemCollector struct {
Threads *prometheus.Desc
}
// newSystemCollector ...
func newSystemCollector(logger log.Logger) (Collector, error) {
// NewSystemCollector ...
func NewSystemCollector() (Collector, error) {
const subsystem = "system"
return &SystemCollector{
logger: log.With(logger, "collector", subsystem),
ContextSwitchesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "context_switches_total"),
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
@@ -70,7 +69,7 @@ func newSystemCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting system metrics", "desc", desc, "err", err)
log.Error("failed collecting system metrics:", desc, err)
return err
}
return nil
@@ -89,7 +88,7 @@ type system struct {
func (c *SystemCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []system
if err := unmarshalObject(ctx.perfObjects["System"], &dst, c.logger); err != nil {
if err := unmarshalObject(ctx.perfObjects["System"], &dst); err != nil {
return nil, err
}

Some files were not shown because too many files have changed in this diff Show More