Compare commits

...

57 Commits

Author SHA1 Message Date
Jan-Otto Kröpke
a93bbe4ac2 ci: disable hostimage image (#1539) 2024-07-29 09:28:44 +02:00
Jan-Otto Kröpke
33efbdfbcb try fix push by bump dependencies (#1537) 2024-07-24 16:24:01 +02:00
Jan-Otto Kröpke
8db705e67a fix push (#1536) 2024-07-24 16:01:22 +02:00
Jan-Otto Kröpke
7044b556c2 Add terminal service session info (#1525) 2024-07-24 11:18:08 +02:00
Jan-Otto Kröpke
fa8d28c181 license collector (#1524) 2024-07-23 13:02:25 +02:00
Jan-Otto Kröpke
64c8423e61 printer collector: Use ENUM pattern for printer status (#1500) 2024-07-23 13:02:09 +02:00
Jan-Otto Kröpke
e2e1141973 logical disk: Fix metrics for non drive letter disks (#1498) 2024-07-23 13:01:30 +02:00
Jan-Otto Kröpke
31bb6d03ee fix default value of collectors.dfsr.sources-enabled (#1506) 2024-07-22 15:21:19 +02:00
Jan-Otto Kröpke
caee5a05fe Add hostprocess image builds (#1507) 2024-07-22 15:21:05 +02:00
FRFlo
578b16b448 fix(README.md): Corrected image links (#1533) 2024-07-22 15:20:48 +02:00
dependabot[bot]
1da96a4f3c chore(deps): bump github.com/Microsoft/hcsshim from 0.12.4 to 0.12.5 (#1535)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-22 14:10:57 +02:00
jacbo0112
fddb92a9d5 Update collector.diskdrive.md (#1502) 2024-07-20 20:09:36 +02:00
jacbo0112
2cda7b3f4d Update README.md (#1516) 2024-07-20 18:25:14 +02:00
dependabot[bot]
b44b0413bb chore(deps): bump golang.org/x/sys from 0.21.0 to 0.22.0 (#1531)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-08 15:10:23 +02:00
dependabot[bot]
f4e360218f chore(deps): bump github.com/prometheus/common from 0.54.0 to 0.55.0 (#1530)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-01 14:54:05 +02:00
Andrey Burtasov
4efb502aab Add label user to process collector (#1472)
Co-authored-by: Jan-Otto Kröpke <github@jkroepke.de>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2024-06-17 11:05:52 +02:00
Jan-Otto Kröpke
a4a5ac464a Service V2 collector (#1497) 2024-06-14 08:59:11 +02:00
Jan-Otto Kröpke
1b438cdb82 Fix timezone caching issues (#1499) 2024-06-14 08:58:58 +02:00
dependabot[bot]
f0d0545d34 chore(deps): bump github.com/prometheus/common from 0.53.0 to 0.54.0 (#1519)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-10 16:00:11 +02:00
dependabot[bot]
d05effdda5 chore(deps): bump github.com/Microsoft/hcsshim from 0.12.3 to 0.12.4 (#1518)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-10 15:33:55 +02:00
dependabot[bot]
2d29f0004b chore(deps): bump golang.org/x/sys from 0.20.0 to 0.21.0 (#1517)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-10 15:33:36 +02:00
jacbo0112
d94640b6f8 Update README.md (#1501) 2024-06-07 18:46:02 +02:00
PrometheusBot
26e05e9d07 Synchronize common files from prometheus/prometheus (#1513) 2024-06-06 20:15:23 +02:00
PrometheusBot
9eec34d2f1 Synchronize common files from prometheus/prometheus (#1512)
Co-authored-by: Jan-Otto Kröpke <github@jkroepke.de>
2024-06-06 17:18:14 +02:00
Benjamin Nash
b8b164bfca Fix omission of RemoteFX Graphics metrics (#1511)
Co-authored-by: PrometheusBot <prometheus-team@googlegroups.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
Co-authored-by: Jan-Otto Kröpke <github@jkroepke.de>
2024-06-06 16:30:24 +02:00
PrometheusBot
7775ef95e6 Synchronize common files from prometheus/prometheus (#1510) 2024-06-03 20:19:25 +02:00
Ben Reedy
eeaefba0d7 feat: allow setting of exporter process priority (#1488) 2024-05-17 22:51:33 +02:00
Jan-Otto Kröpke
b2ab542b6d Fix docker push on release (#1496) 2024-05-17 19:38:51 +02:00
Soheil Rahmat
f85866ce1e Add Printer Exporter (#1485) 2024-05-17 18:47:32 +02:00
jacbo0112
9e24ce8f74 Update collector.diskdrive.md (#1495) 2024-05-17 07:17:26 +02:00
Darin Truckenmiller
9c65b7464f Add new RemoteFX network metrics (#1489) 2024-05-15 23:13:02 +02:00
Jan-Otto Kröpke
c242fae84c Remove push to quay.io (#1490) 2024-05-15 02:38:01 +02:00
Ben Reedy
c85cfaadde Merge pull request #1487 from breed808/ci
Additional CI linters
2024-05-15 07:13:19 +10:00
Ben Reedy
d6c24d1500 feat(ci): enable useful golangci output
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 07:07:32 +10:00
Ben Reedy
8b515ba54a feat(ci): enable promlinter
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 06:19:44 +10:00
Ben Reedy
a2575b93a9 feat(ci): add unused linter
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 06:19:39 +10:00
Ben Reedy
965be334bc ci: reverse default linter logic
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 06:19:34 +10:00
Ben Reedy
1239fbf719 perf: run perfsprint fixes
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 06:19:29 +10:00
Ben Reedy
a49dee606b perf: pre-allocate slices
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 06:19:24 +10:00
Ben Reedy
c713bed4e3 chore(ci): update golangci-lint to latest version
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 06:19:19 +10:00
Ben Reedy
99b6d215a2 chore(ci): order linters alphabetically
Signed-off-by: Ben Reedy <breed808@breed808.com>
2024-05-15 06:19:14 +10:00
dependabot[bot]
6d91cdc9fc chore(deps): bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1 (#1484)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-13 13:43:14 +02:00
PrometheusBot
c9ebc3e153 Synchronize common files from prometheus/prometheus (#1483) 2024-05-12 21:02:50 +02:00
Jan-Otto Kröpke
c99cf180d0 fix: makefile variable override (#1482) 2024-05-12 12:25:06 +02:00
Jan-Otto Kröpke
6e14d4e53f fix: release build again again (#1481) 2024-05-12 10:58:09 +02:00
PrometheusBot
48e7c34539 Synchronize common files from prometheus/prometheus (#1453)
Co-authored-by: Ben Kochie <superq@gmail.com>
2024-05-12 10:57:45 +02:00
Jan-Otto Kröpke
6497a1a5cc fix: release build again (#1480) 2024-05-12 10:04:33 +02:00
Jan-Otto Kröpke
4e1987686b fix: release build again (#1479) 2024-05-12 01:46:05 +02:00
Jan-Otto Kröpke
dc4bc8e163 fix: release build (#1478) 2024-05-12 01:27:54 +02:00
Jan-Otto Kröpke
b8747045ce Enable process V1 performance counters by default (#1477) 2024-05-12 00:24:52 +02:00
Jan-Otto Kröpke
00781dbbee scheduled_task: Move OLE connection to collect function (#1451) 2024-05-12 00:22:33 +02:00
Jan-Otto Kröpke
195cfa8d5c Add: push docker images to dockerhub and quay.io (#1469) 2024-05-11 23:06:22 +02:00
rob-scheepens
be25d79b71 Implementing smbclient collector (#1408)
Co-authored-by: Bob Allegretti <ballegre@gmail.com>
Co-authored-by: Jan-Otto Kröpke <github@jkroepke.de>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2024-05-11 18:06:03 +02:00
Ben Reedy
9bf84fb10c Remove unused *prometheus.Desc return value from collectors collect() function (#1475) 2024-05-11 12:05:45 +02:00
Jan-Otto Kröpke
b977c8484b Remove landing page (#1471) 2024-05-11 09:41:59 +02:00
Jan-Otto Kröpke
13ebec0195 fix: release builds (#1474) 2024-05-09 15:32:26 +02:00
Jan-Otto Kröpke
cc89ae33a4 fix: release builds (#1468) 2024-05-07 13:23:43 +02:00
85 changed files with 3130 additions and 1140 deletions

View File

@@ -0,0 +1,57 @@
---
name: Push README to Docker Hub
on:
push:
paths:
- "README.md"
- "README-containers.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
permissions:
contents: read
jobs:
PushDockerHubReadme:
runs-on: ubuntu-latest
name: Push README to Docker Hub
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}
DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''
PushQuayIoReadme:
runs-on: ubuntu-latest
name: Push README to quay.io
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to quay.io
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''

View File

@@ -30,8 +30,8 @@ jobs:
test:
runs-on: windows-2019
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
@@ -44,7 +44,6 @@ jobs:
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.2.0
# GOPATH\bin dir must be appended to PATH else the `promu` command won't be found
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
@@ -54,8 +53,8 @@ jobs:
promtool:
runs-on: windows-2019
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
@@ -69,8 +68,6 @@ jobs:
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
# No binaries available so build from source
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.2.0
# GOPATH\bin dir must be appended to PATH else the `promu` command won't be found
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
@@ -95,20 +92,13 @@ jobs:
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@v6
with:
version: v1.55.2
args: "--timeout=5m"
# golangci-lint action doesn't always provide helpful output, so re-run without the action for
# better output of the problem.
# The cache from the golangci-lint step is re-used here, so this step should finish quickly.
- name: errors
if: ${{ failure() }}
run: golangci-lint run --timeout=5m -c .golangci.yaml
version: v1.58
args: "--timeout=5m --out-format github-actions,colored-line-number"

View File

@@ -2,6 +2,10 @@ name: Releases
# Trigger on releases.
on:
push:
branches:
- master
pull_request:
release:
types:
- published
@@ -18,15 +22,40 @@ jobs:
build:
runs-on: windows-2022
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
# fetch-depth required for gitversion in `Build` step
fetch-depth: 0
- uses: actions/setup-go@v3
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
# https://github.com/pl4nty/Windows-Containers/blob/Main/helpful_tools/Install-BuildKit-GitHubActions/workflow.yaml
- name: Setup containerd
run: |
$version = "1.7.20"
curl.exe -L https://github.com/containerd/containerd/releases/download/v$version/containerd-$version-windows-amd64.tar.gz -o containerd.tar.gz
tar.exe xvf containerd.tar.gz
.\bin\containerd.exe --register-service
Start-Service containerd
- name: Setup BuildKit
run: |
$version = "v0.15.0"
curl.exe -L https://github.com/moby/buildkit/releases/download/$version/buildkit-$version.windows-amd64.tar.gz -o buildkit.tar.gz
tar.exe xvf buildkit.tar.gz
.\bin\buildkitd.exe --register-service
Start-Service buildkitd
- name: Setup Docker Buildx
run: |
$version = "v0.16.1"
curl.exe -L https://github.com/docker/buildx/releases/download/$version/buildx-$version.windows-amd64.exe -o $env:ProgramData\Docker\cli-plugins\docker-buildx.exe
- uses: docker/setup-buildx-action@v3
with:
driver: remote
endpoint: npipe:////./pipe/buildkitd
- name: Install WiX
run: dotnet tool install --global wix
@@ -37,60 +66,82 @@ jobs:
- name: Install Build deps
run: |
dotnet tool install --global GitVersion.Tool --version 5.*
Invoke-WebRequest -Uri https://github.com/prometheus/promu/releases/download/v$($Env:PROMU_VER)/promu-$($Env:PROMU_VER).windows-amd64.zip -OutFile promu-$($Env:PROMU_VER).windows-amd64.zip
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
# No binaries available so build from source
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0
# GOPATH\bin dir must be added to PATH else the `promu` and `goversioninfo` commands won't be found
# GOPATH\bin dir must be added to PATH else the `promu` commands won't be found
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Build
run: |
$ErrorActionPreference = "Stop"
dotnet-gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
$Version = Get-Content VERSION
# Windows versioninfo resources need the file version by parts (but product version is free text)
$VersionParts = ($Version -replace '^v?([0-9\.]+).*$','$1').Split(".")
goversioninfo.exe -ver-major $VersionParts[0] -ver-minor $VersionParts[1] -ver-patch $VersionParts[2] -product-version $Version -platform-specific
make crossbuild
$Version = git describe --tag
$Version = $Version -replace 'v', ''
# '+' symbols are invalid characters in image tags
(Get-Content -Path VERSION) -replace '\+', '_' | Set-Content -Path VERSION
$Version = $Version -replace '\+', '_'
$Version | Set-Content VERSION -PassThru
make build-all
# GH requires all files to have different names, so add version/arch to differentiate
foreach($Arch in "amd64", "arm64") {
Move-Item output\$Arch\windows_exporter.exe output\windows_exporter-$Version-$Arch.exe
}
Get-ChildItem -Path output
- name: Upload Artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: windows_exporter_binaries
path: output\windows_exporter-*.exe
- name: Build Release Artifacts
if: startsWith(github.ref, 'refs/tags/')
run: |
$ErrorActionPreference = "Stop"
$BuildVersion = Get-Content VERSION
$TagName = $env:GITHUB_REF -replace 'refs/tags/', ''
# The MSI version is not semver compliant, so just take the numerical parts
$MSIVersion = $TagName -replace '^v?([0-9\.]+).*$','$1'
$Version = Get-Content VERSION
foreach($Arch in "amd64", "arm64") {
Write-Verbose "Building windows_exporter $MSIVersion msi for $Arch"
.\installer\build.ps1 -PathToExecutable .\output\windows_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
Move-Item installer\windows_exporter-$MSIVersion-$Arch.msi output\
Write-Host "Building windows_exporter $Version msi for $Arch"
.\installer\build.ps1 -PathToExecutable .\output\windows_exporter-$Version-$Arch.exe -Version $Version -Arch "$Arch"
}
Move-Item installer\*.msi output\
Get-ChildItem -Path output\
promu checksum output\
- name: Build Docker Artifacts
run: make build-all
env:
VERSION: >-
${{
startsWith(github.ref, 'refs/tags/') && 'latest' ||
(
github.event_name == 'pull_request' && format('pr-{0}', github.event.number) || github.ref_name
)
}}
- name: Login to Docker Hub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_LOGIN }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
#- name: Login to quay.io
# if: ${{ github.event_name != 'pull_request' }}
# uses: docker/login-action@v3
# with:
# registry: quay.io
# username: 'robot'
# password: ${{ secrets.QUAY_IO_API_TOKEN }}
- name: Login to GitHub container registry
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -100,8 +151,8 @@ jobs:
if: ${{ github.event_name != 'pull_request' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VERSION: ${{ startsWith(github.ref, 'refs/tags/') && 'latest' || github.ref_name }}
run: |
$Env:VERSION = 'latest'
make push-all
- name: Release

View File

@@ -17,7 +17,7 @@ jobs:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: codespell-project/actions-codespell@master
with:
check_filenames: true

1
.gitignore vendored
View File

@@ -8,3 +8,4 @@ output/
*.syso
installer/*.msi
installer/*.wixpdb
local/

View File

@@ -1,14 +1,100 @@
linters:
disable-all: true
enable:
- deadcode
- errcheck
- revive
- govet
- gofmt
- ineffassign
- unconvert
- varcheck
enable-all: true
disable:
- asasalint
- asciicheck
- bidichk
- bodyclose
- canonicalheader
- containedctx
- contextcheck
- copyloopvar
- cyclop
- decorder
- depguard
- dogsled
- dupl
- dupword
- durationcheck
- err113
- errchkjson
- errname
- errorlint
- exhaustive
- exhaustruct
- exportloopref
- fatcontext
- forbidigo
- forcetypeassert
- funlen
- gci
- ginkgolinter
- gocheckcompilerdirectives
- gochecknoglobals
- gochecknoinits
- gochecksumtype
- gocognit
- goconst
- gocritic
- gocyclo
- godot
- godox
- gofumpt
- goheader
- goimports
- gomoddirectives
- gomodguard
- goprintffuncname
- gosec
- gosimple
- gosmopolitan
- grouper
- importas
- inamedparam
- interfacebloat
- intrange
- ireturn
- lll
- maintidx
- makezero
- mirror
- misspell
- mnd
- musttag
- nakedret
- nestif
- nlreturn
- noctx
- nolintlint
- nonamedreturns
- nosprintfhostport
- paralleltest
- predeclared
- protogetter
- reassign
- rowserrcheck
- sloglint
- spancheck
- sqlclosecheck
- staticcheck
- stylecheck
- tagalign
- tagliatelle
- tenv
- testableexamples
- testifylint
- testpackage
- thelper
- tparallel
- usestdlibvars
- varnamelen
- wastedassign
- whitespace
- wrapcheck
- wsl
- zerologlint
- execinquery
- gomnd
issues:
exclude:

View File

@@ -1,7 +1,7 @@
# Note this image doesn't really matter for hostprocess but it is good to build per OS version
# the files in the image are copied to $env:CONTAINER_SANDBOX_MOUNT_POINT on the host
# but the file system is the Host NOT the container
ARG BASE="mcr.microsoft.com/windows/nanoserver:1809"
ARG BASE="mcr.microsoft.com/windows/nanoserver:ltsc2022"
FROM $BASE
ENV PATH="C:\Windows\system32;C:\Windows;"

View File

@@ -1,15 +1,19 @@
export GOOS=windows
export DOCKER_IMAGE_NAME ?= windows-exporter
export DOCKER_REPO ?= ghcr.io/prometheus-community
GOOS ?= windows
VERSION ?= $(shell cat VERSION)
DOCKER ?= docker
VERSION?=$(shell cat VERSION)
DOCKER?=docker
# DOCKER_REPO is the official image repository name at docker.io, quay.io.
DOCKER_REPO ?= prometheuscommunity
DOCKER_IMAGE_NAME ?= windows-exporter
# Image Variables for Hostprocess Container
# ALL_DOCKER_REPOS is the list of repositories to push the image to. ghcr.io requires that org name be the same as the image repo name.
ALL_DOCKER_REPOS ?= docker.io/$(DOCKER_REPO) ghcr.io/prometheus-community # quay.io/$(DOCKER_REPO)
# Image Variables for host process Container
# Windows image build is heavily influenced by https://github.com/kubernetes/kubernetes/blob/master/cluster/images/etcd/Makefile
OS=1809
ALL_OS:= 1809 ltsc2022
BASE_IMAGE=mcr.microsoft.com/windows/nanoserver
OS ?= ltsc2019
ALL_OS ?= ltsc2019 ltsc2022
BASE_IMAGE ?= mcr.microsoft.com/windows/nanoserver
.PHONY: build
build: generate windows_exporter.exe
@@ -25,7 +29,7 @@ test:
go test -v ./...
bench:
go test -v -bench='benchmarkcollector' ./pkg/collector/{cpu,logical_disk,physical_disk,logon,memory,net,process,service,system,tcp,time}
go test -v -bench='benchmarkcollector' ./pkg/collector/{cpu,logical_disk,physical_disk,logon,memory,net,printer,process,service,system,tcp,time}
lint:
golangci-lint -c .golangci.yaml run
@@ -47,22 +51,46 @@ crossbuild: generate
GOARCH=amd64 promu build --prefix=output/amd64
GOARCH=arm64 promu build --prefix=output/arm64
.PHONY: package
package: crossbuild
powershell -NonInteractive -ExecutionPolicy Bypass -File .\installer\build.ps1 -PathToExecutable .\output\amd64\windows_exporter.exe -Version $(shell git describe --tags --abbrev=0)
build-image: crossbuild
$(DOCKER) build --build-arg=BASE=$(BASE_IMAGE):$(OS) -f Dockerfile -t $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$(OS) .
$(DOCKER) build --build-arg=BASE=$(BASE_IMAGE):$(OS) -f Dockerfile -t local/$(DOCKER_IMAGE_NAME):$(VERSION)-$(OS) .
build-hostprocess:
$(DOCKER) buildx build --build-arg=BASE=mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 -f Dockerfile -t local/$(DOCKER_IMAGE_NAME):$(VERSION)-hostprocess .
sub-build-%:
$(MAKE) OS=$* build-image
build-all: $(addprefix sub-build-,$(ALL_OS))
build-all: $(addprefix sub-build-,$(ALL_OS)) build-hostprocess
push:
set -x; \
for osversion in ${ALL_OS}; do \
$(DOCKER) push $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
$(DOCKER) manifest create --amend $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION) $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
full_version=`$(DOCKER) manifest inspect $(BASE_IMAGE):$${osversion} | grep "os.version" | head -n 1 | awk -F\" '{print $$4}'` || true; \
$(DOCKER) manifest annotate --os windows --arch amd64 --os-version $${full_version} $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION) $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
for docker_repo in ${DOCKER_REPO}; do \
for osversion in ${ALL_OS}; do \
$(DOCKER) tag local/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion} $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
$(DOCKER) push $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
$(DOCKER) manifest create --amend $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION) $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
full_version=`$(DOCKER) manifest inspect $(BASE_IMAGE):$${osversion} | grep "os.version" | head -n 1 | awk -F\" '{print $$4}'` || true; \
$(DOCKER) manifest annotate --os windows --arch amd64 --os-version $${full_version} $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION) $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
done; \
$(DOCKER) manifest push --purge $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION); \
done
$(DOCKER) manifest push --purge $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)
push-all: build-all push
# We can't load the image into the local docker store, so we have to build and push it in one go
push-hostprocess:
set -x; \
for docker_repo in ${DOCKER_REPO}; do \
$(DOCKER) buildx build --push --build-arg=BASE=mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 -f Dockerfile -t $${docker_repo}/$(DOCKER_IMAGE_NAME):$(VERSION)-hostprocess .; \
done
.PHONY: push-all
push-all: build-all
$(MAKE) DOCKER_REPO="$(ALL_DOCKER_REPOS)" push # push-hostprocess - disabled until it works on Windows
# Mandatory target for container description sync action
.PHONY: docker-repo-name
docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"

View File

@@ -4,7 +4,6 @@
A Prometheus exporter for Windows machines.
## Collectors
Name | Description | Enabled by default
@@ -17,6 +16,7 @@ Name | Description | Enabled by default
[cpu_info](docs/collector.cpu_info.md) | CPU Information |
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | &#10003;
[container](docs/collector.container.md) | Container metrics |
[diskdrive](docs/collector.diskdrive.md) | Diskdrive metrics |
[dfsr](docs/collector.dfsr.md) | DFSR metrics |
[dhcp](docs/collector.dhcp.md) | DHCP Server |
[dns](docs/collector.dns.md) | DNS Server |
@@ -24,6 +24,7 @@ Name | Description | Enabled by default
[fsrmquota](docs/collector.fsrmquota.md) | Microsoft File Server Resource Manager (FSRM) Quotas collector |
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
[iis](docs/collector.iis.md) | IIS sites and applications |
[license](docs/collector.license.md) | Windows license status |
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | &#10003;
[logon](docs/collector.logon.md) | User logon sessions |
[memory](docs/collector.memory.md) | Memory usage metrics |
@@ -44,11 +45,14 @@ Name | Description | Enabled by default
[netframework_clrsecurity](docs/collector.netframework_clrsecurity.md) | .NET Framework Security Check metrics |
[net](docs/collector.net.md) | Network interface I/O | &#10003;
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | &#10003;
[physical_disk](docs/collector.physical_disk.md) | physical disk metrics | &#10003;
[printer](docs/collector.printer.md) | Printer metrics |
[process](docs/collector.process.md) | Per-process metrics |
[remote_fx](docs/collector.remote_fx.md) | RemoteFX protocol (RDP) metrics |
[scheduled_task](docs/collector.scheduled_task.md) | Scheduled Tasks metrics |
[service](docs/collector.service.md) | Service state metrics | &#10003;
[smb](docs/collector.smb.md) | SMB Server |
[smbclient](docs/collector.smbclient.md) | SMB Client |
[smtp](docs/collector.smtp.md) | IIS SMTP Server |
[system](docs/collector.system.md) | System calls | &#10003;
[tcp](docs/collector.tcp.md) | TCP connections |
@@ -81,17 +85,17 @@ This can be useful for having different Prometheus servers collect specific metr
windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above.
Flag | Description | Default value
---------|-------------|--------------------
`--web.listen-address` | host:port for exporter. | `:9182`
`--telemetry.path` | URL path for surfacing collected metrics. | `/metrics`
`--telemetry.max-requests` | Maximum number of concurrent requests. 0 to disable. | `5`
`--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default." | `[defaults]`
`--collectors.print` | If true, print available collectors and exit. |
`--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5`
`--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None
`--config.file` | [Using a config file](#using-a-configuration-file) from path or URL | None
`--config.file.insecure-skip-verify` | Skip TLS when loading config file from URL | false
| Flag | Description | Default value |
|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
| `--web.listen-address` | host:port for exporter. | `:9182` |
| `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` |
| `--telemetry.max-requests` | Maximum number of concurrent requests. 0 to disable. | `5` |
| `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default." | `[defaults]` |
| `--collectors.print` | If true, print available collectors and exit. | |
| `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` |
| `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None |
| `--config.file` | [Using a config file](#using-a-configuration-file) from path or URL | None |
| `--config.file.insecure-skip-verify` | Skip TLS when loading config file from URL | false |
## Installation
The latest release can be downloaded from the [releases page](https://github.com/prometheus-community/windows_exporter/releases).
@@ -100,15 +104,17 @@ Each release provides a .msi installer. The installer will setup the windows_exp
If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc. The following parameters are available:
Name | Description
-----|------------
`ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors
`LISTEN_ADDR` | The IP address to bind to. Defaults to 0.0.0.0
`LISTEN_PORT` | The port to bind to. Defaults to 9182.
`METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics`
`TEXTFILE_DIRS` | As the `--collector.textfile.directories` flag, provide a directory to read text files with metrics from
`REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address).
`EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string.
| Name | Description |
|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
| `ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors |
| `LISTEN_ADDR` | The IP address to bind to. Defaults to an empty string. (any local address) |
| `LISTEN_PORT` | The port to bind to. Defaults to `9182`. |
| `METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics` |
| `TEXTFILE_DIRS` | As the `--collector.textfile.directories` flag, provide a directory to read text files with metrics from |
| `REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address). |
| `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. |
| `ADD_FIREWALL_EXCEPTION` | Setup an firewall exception for windows_exporter. Defaults to `yes`. |
| `ENABLE_V1_PERFORMANCE_COUNTERS` | Enables V1 performance counter on modern systems. Defaults to `yes`. |
Parameters are sent to the installer via `msiexec`. Example invocations:
@@ -133,6 +139,12 @@ To install the exporter with creating a firewall exception, use the following co
msiexec /i <path-to-msi-file> ADD_FIREWALL_EXCEPTION=yes
```
To repair an installation, e.g force re-creating Windows service:
```powershell
msiexec /fa <path-to-msi-file>
```
Powershell versions 7.3 and above require [PSNativeCommandArgumentPassing](https://learn.microsoft.com/en-us/powershell/scripting/learn/experimental-features?view=powershell-7.3) to be set to `Legacy` when using `--% EXTRA_FLAGS`:
@@ -141,6 +153,21 @@ $PSNativeCommandArgumentPassing = 'Legacy'
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--collector.service.services-where ""Name LIKE 'sql%'"""
```
## Docker Implementation
The windows_exporter can be run as a Docker container. The Docker image is available on
* [Docker Hub](https://hub.docker.com/r/prometheuscommunity/windows-exporter): `docker.io/prometheuscommunity/windows-exporter`
* [GitHub Container Registry](https://github.com/prometheus-community/windows_exporter/pkgs/container/windows-exporter): `ghcr.io/prometheus-community/windows-exporter`
<!-- * [quay.io Registry](https://quay.io/repository/prometheuscommunity/windows-exporter): `quay.io/prometheuscommunity/windows-exporter` -->
### Tags
The Docker image is tagged with the version of the exporter. The `latest` tag is also available and points to the latest release.
Additionally, a flavor `hostprocess` with `-hostprocess` as suffix is based on the https://github.com/microsoft/windows-host-process-containers-base-image
which is designed to run as a Windows host process container. The size of that images is smaller than the default one.
## Kubernetes Implementation
See detailed steps to install on Windows Kubernetes [here](./kubernetes/kubernetes.md).

View File

@@ -35,7 +35,7 @@ Name | Description | Type | Labels
`windows_cpu_processor_mperf_total` | Processor MPerf Total is proportioanl to the number of TSC ticks each core has accumulated while executing instructions. Due to the manner in which it is presented, it should be scaled by 1e2 to properly line up with Processor Performance Total. As above, it is believed to be closely related to the MPERF MSR. | counter | `core`
`windows_cpu_processor_rtc_total` | RTC total is assumed to represent the 64Hz tick rate in Windows. It is not by itself useful, but can be used with `windows_cpu_processor_utility_total` to more accurately measure CPU utilisation than with `windows_cpu_time_total` | counter | `core`
`windows_cpu_processor_utility_total` | Processor Utility Total is a newer, more accurate measure of CPU utilization, in particular handling modern CPUs with variant CPU frequencies. The rate of this counter divided by the rate of `windows_cpu_processor_rtc_total` should provide an accurate view of CPU utilisation on modern systems, as observed in Task Manager. | counter | `core`
`windows_cpu_processor_privileged_utility_total` | Processor Privilged Utility Total, when used in a similar fashion to `windows_cpu_processor_utility_total` will show the portion of CPU utilization which is happening in privileged mode. | counter | `core`
`windows_cpu_processor_privileged_utility_total` | Processor Privileged Utility Total, when used in a similar fashion to `windows_cpu_processor_utility_total` will show the portion of CPU utilization which is happening in privileged mode. | counter | `core`
### Example metric
Show frequency of host CPU cores

View File

@@ -5,7 +5,7 @@ The diskdrive collector exposes metrics about physical disks
| | |
| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| Metric name prefix | `diskdrive` |
| Classes | [`Win32_PerfRawData_DNS_DNS`](https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-diskdrive) |
| Classes | [`Win32_DiskDrive`](https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-diskdrive) |
| Enabled by default? | No |
## Flags
@@ -16,11 +16,11 @@ None
| Name | Description | Type | Labels |
| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------ |
| `disk_drive_info` | General identifiable information about the disk drive | gauge | name,caption,device_id,model |
| `disk_drive_availability` | The disk drive's current availability | gauge | name,availability |
| `disk_drive_partitions` | Number of partitions on the drive | gauge | name |
| `disk_drive_size` | Size of the disk drive. It is calculated by multiplying the total number of cylinders, tracks in each cylinder, sectors in each track, and bytes in each sector. | gauge | name |
| `disk_drive_status` | Operational status of the drive | gauge | name,status |
| `diskdrive_info` | General identifiable information about the disk drive | gauge | name,caption,device_id,model |
| `diskdrive_availability` | The disk drive's current availability | gauge | name,availability |
| `diskdrive_partitions` | Number of partitions on the drive | gauge | name |
| `diskdrive_size` | Size of the disk drive. It is calculated by multiplying the total number of cylinders, tracks in each cylinder, sectors in each track, and bytes in each sector. | gauge | name |
| `diskdrive_status` | Operational status of the drive | gauge | name,status |
## Alerting examples
**prometheus.rules**

53
docs/collector.license.md Normal file
View File

@@ -0,0 +1,53 @@
# license collector
The license collector exposes metrics about the Windows license status.
|||
-|-
Metric name prefix | `license`
Data source | Win32
Enabled by default? | No
## Flags
None
## Metrics
| Name | Description | Type | Labels |
|--------------------------|----------------|-------|---------|
| `windows_license_status` | license status | gauge | `state` |
### Example metric
```
# HELP windows_license_status Status of windows license
# TYPE windows_license_status gauge
windows_license_status{state="genuine"} 1
windows_license_status{state="invalid_license"} 0
windows_license_status{state="last"} 0
windows_license_status{state="offline"} 0
windows_license_status{state="tampered"} 0
```
## Useful queries
Show if the license is genuine
```
windows_license_status{state="genuine"}
```
## Alerting examples
**prometheus.rules**
```yaml
- alert: "WindowsLicense"
expr: 'windows_license_status{state="genuine"} == 0'
for: "10m"
labels:
severity: "high"
annotations:
summary: "Windows system license is not genuine"
description: "The Windows system license is not genuine. Please check the license status."
```

View File

@@ -23,6 +23,7 @@ If given, a disk needs to *not* match the exclude regexp in order for the corres
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_logical_disk_info` | A metric with a constant '1' value labeled with logical disk information | gauge | `disk`,`filesystem`,`serial_number`,`volume`,`volume_name`,`type`
`windows_logical_disk_requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `volume`
`windows_logical_disk_avg_read_requests_queued` | Average number of read requests that were queued for the selected disk during the sample interval | gauge | `volume`
`windows_logical_disk_avg_write_requests_queued` | Average number of write requests that were queued for the selected disk during the sample interval | gauge | `volume`
@@ -36,6 +37,7 @@ Name | Description | Type | Labels
`windows_logical_disk_size_bytes` | Total size of the disk in bytes (not real time, updates every 10-15 min) | gauge | `volume`
`windows_logical_disk_idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `volume`
`windows_logical_disk_split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `volume`
`windows_logical_disk_readonly` | Whether the logical disk is read-only | gauge | `volume`
### Warning about size metrics
The `free_bytes` and `size_bytes` metrics are not updated in real time and might have a delay of 10-15min.
@@ -47,6 +49,15 @@ Query the rate of write operations to a disk
rate(windows_logical_disk_read_bytes_total{instance="localhost", volume=~"C:"}[2m])
```
Logical Volume information
```
windows_logical_disk_info{disk_id="0",filesystem="",serial_number="",type="",volume="HarddiskVolume2",volume_name=""} 1
windows_logical_disk_info{disk_id="0",filesystem="",serial_number="",type="",volume="HarddiskVolume3",volume_name=""} 1
windows_logical_disk_info{disk_id="0",filesystem="NTFS",serial_number="668EEC37",type="fixed",volume="C:",volume_name="Windows"} 1
windows_logical_disk_info{disk_id="1",filesystem="NTFS",serial_number="50AE953B",type="fixed",volume="D:",volume_name="Temporary Storage"} 1
windows_logical_disk_info{disk_id="1",filesystem="ReFS",serial_number="C69B59AD",type="fixed",volume="G:",volume_name="Volume"} 1
```
## Useful queries
Calculate rate of total IOPS for disk
```

28
docs/collector.printer.md Normal file
View File

@@ -0,0 +1,28 @@
# printer collector
The printer collector exposes metrics about printers and their jobs.
| | |
|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Metric name prefix | `printer` |
| Data source | WMI |
| Classes | [Win32_Printer](https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-printer) <br> [Win32_PrintJob](https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-printjob) |
| Enabled by default? | false |
## Flags
### `--collector.printer.include`
If given, a printer needs to match the include regexp in order for the corresponding printer metrics to be reported
### `--collector.printer.exclude`
If given, a printer needs to *not* match the exclude regexp in order for the corresponding printer metrics to be reported
## Metrics
Name | Description | Type | Labels
-----|-------------|---------|-------
`windows_printer_status` | Status of the printer at the time the performance data is collected | counter | `printer`, `status`
`windows_printer_job_count` | Number of jobs processed by the printer since the last reset | gauge | `printer`
`windows_printer_job_status` | A counter of printer jobs by status | gauge | `printer`, `status`

View File

@@ -2,16 +2,23 @@
The process collector exposes metrics about processes.
|||
-|-
Metric name prefix | `process`
Data source | Perflib
Counters | `Process`
Enabled by default? | No
Note, on Windows Server 2022, the `Process` counter set is disabled by default. To enable it, run the following command in an elevated PowerShell session:
```powershell
lodctr.exe /E:Lsa
lodctr.exe /E:PerfProc
lodctr.exe /R
```
| | |
|---------------------|-----------|
| Metric name prefix | `process` |
| Data source | Perflib |
| Counters | `Process` |
| Enabled by default? | No |
## Flags
<<<<<<< HEAD
### `--collector.process.include`
Regexp of processes to include. Process name must both match `include` and not
@@ -30,6 +37,12 @@ Enables IIS process name queries. IIS process names are combined with their app
Disabled by default, and can be enabled with `--collector.process.iis=true`.
### `--collector.process.report-owner`
Enables reporting of the process owner. This is a potentially expensive operation.
Disabled by default, and can be enabled with `--collector.process.report-owner`.
### Example
To match all firefox processes: `--collector.process.include="firefox.*"`.
Note that multiple processes with the same name will be disambiguated by
@@ -61,23 +74,23 @@ w3wp_Test
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_process_start_time` | Time of process start | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_cpu_time_total` | Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user). An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions is included in this count. | counter | `process`, `process_id`, `creating_process_id`
`windows_process_handles` | Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_io_bytes_total` | Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `creating_process_id`
`windows_process_io_operations_total` | I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `creating_process_id`
`windows_process_page_faults_total` | Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared. | counter | `process`, `process_id`, `creating_process_id`
`windows_process_page_file_bytes` | Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_pool_bytes` | Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. Nonpaged pool bytes is calculated differently than paged pool bytes, so it might not equal the total of paged pool bytes. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_priority_base` | Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_private_bytes` | Current number of bytes this process has allocated that cannot be shared with other processes. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_threads` | Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_virtual_bytes` | Current size, in bytes, of the virtual address space that the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_working_set_private_bytes` | Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_working_set_peak_bytes` | Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before they leave main memory. | gauge | `process`, `process_id`, `creating_process_id`
`windows_process_working_set_bytes` | Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `process`, `process_id`, `creating_process_id`
| Name | Description | Type | Labels |
|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------------------------------------------------------------|
| `windows_process_start_time` | Time of process start | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_cpu_time_total` | Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user). An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions is included in this count. | counter | `process`, `process_id`, `creating_process_id`, `owner`, `mode` |
| `windows_process_handles` | Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_io_bytes_total` | Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `creating_process_id`, `owner`, `mode` |
| `windows_process_io_operations_total` | I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `creating_process_id`, `owner`, `mode` |
| `windows_process_page_faults_total` | Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared. | counter | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_page_file_bytes` | Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_pool_bytes` | Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. Nonpaged pool bytes is calculated differently than paged pool bytes, so it might not equal the total of paged pool bytes. | gauge | `process`, `process_id`, `creating_process_id`, `owner`, `pool` |
| `windows_process_priority_base` | Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_private_bytes` | Current number of bytes this process has allocated that cannot be shared with other processes. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_threads` | Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_virtual_bytes` | Current size, in bytes, of the virtual address space that the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_working_set_private_bytes` | Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_working_set_peak_bytes` | Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before they leave main memory. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
| `windows_process_working_set_bytes` | Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `process`, `process_id`, `creating_process_id`, `owner` |
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_

View File

@@ -24,10 +24,15 @@ Name | Description | Type | Labels
`windows_remote_fx_net_current_tcp_rtt_seconds` | Average TCP round-trip time (RTT) detected in seconds. | gauge | `session_name`
`windows_remote_fx_net_current_udp_bandwidth` | UDP Bandwidth detected in bytes per second. | gauge | `session_name`
`windows_remote_fx_net_current_udp_rtt_seconds` | Average UDP round-trip time (RTT) detected in seconds. | gauge | `session_name`
`windows_remote_fx_net_received_bytes_total` | _Not yet documented_ | counter | `session_name`
`windows_remote_fx_net_sent_bytes_total` | _Not yet documented_ | counter | `session_name`
`windows_remote_fx_net_received_bytes_total` | Total bytes received over the network session. | counter | `session_name`
`windows_remote_fx_net_sent_bytes_total` | Total bytes sent over the network session. | counter | `session_name`
`windows_remote_fx_net_udp_packets_received_total` | Rate in packets per second at which packets are received over UDP. | counter | `session_name`
`windows_remote_fx_net_udp_packets_sent_total` | Rate in packets per second at which packets are sent over UDP. | counter | `session_name`
`windows_remote_fx_net_loss_rate` | Network packet loss rate detected over the RemoteFX session, expressed as a percentage. | counter | `session_name`
`windows_remote_fx_net_fec_rate` | Forward Error Correction (FEC) rate applied to packets sent over the RemoteFX session, expressed as a percentage. | counter | `session_name`
`windows_remote_fx_net_retransmission_rate` Rate of packets retransmitted over the RemoteFX session, expressed as a percentage. | counter | `session_name`
## Metrics (Graphics)

View File

@@ -2,11 +2,9 @@
The service collector exposes metrics about Windows Services
|||
-|-
Metric name prefix | `service`
Classes | [`Win32_Service`](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx)
Enabled by default? | Yes
The collector exists in 2 different version. Version 1 is using WMI to query all services and is able to provide additional
information. Version 2 is a more efficient solution by directly connecting to the service manager, but is not able to
provide additional information like `run_as` or start configuration
## Flags
@@ -22,6 +20,19 @@ Example config win_exporter.yml for multiple services: `services-where: Name='SQ
Uses API calls instead of WMI for performance optimization. **Note** the previous flag (`--collector.service.services-where`) won't have any effect on this mode.
### `--collector.service.v2`
Version 2 of the service collector. Is using API calls for performance optimization. **Note** the previous flag (`--collector.service.services-where`) won't have any effect on this mode.
For additional performance reasons, it doesn't provide any additional information like `run_as` or start configuration.
# collector V1
|||
-|-
Metric name prefix | `service`
Classes | [`Win32_Service`](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx)
Enabled by default? | Yes
## Metrics
Name | Description | Type | Labels
@@ -91,6 +102,53 @@ Counts the number of Microsoft SQL Server/Agent Processes
count(windows_service_state{exported_name=~"(sqlserveragent|mssqlserver)",state="running"})
```
# collector V2
|||
-|-
Metric name prefix | `service`
Classes | none
Enabled by default? | No
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_service_state` | The state of the service, 1 if the current state, 0 otherwise | gauge | name, display_name, state
### States
A service can be in the following states:
- `stopped`
- `start pending`
- `stop pending`
- `running`
- `continue pending`
- `pause pending`
- `paused`
- `unknown`
### Example metric
```
windows_service_state{display_name="Declared Configuration(DC) service",name="dcsvc",status="continue pending"} 0
windows_service_state{display_name="Declared Configuration(DC) service",name="dcsvc",status="pause pending"} 0
windows_service_state{display_name="Declared Configuration(DC) service",name="dcsvc",status="paused"} 0
windows_service_state{display_name="Declared Configuration(DC) service",name="dcsvc",status="running"} 0
windows_service_state{display_name="Declared Configuration(DC) service",name="dcsvc",status="start pending"} 0
windows_service_state{display_name="Declared Configuration(DC) service",name="dcsvc",status="stop pending"} 0
windows_service_state{display_name="Declared Configuration(DC) service",name="dcsvc",status="stopped"} 1
```
## Useful queries
Counts the number of Microsoft SQL Server/Agent Processes
```
count(windows_service_state{name=~"(sqlserveragent|mssqlserver)",state="running"})
```
## Alerting examples
**prometheus.rules**
```yaml
@@ -100,7 +158,7 @@ groups:
# Sends an alert when the 'sqlserveragent' service is not in the running state for 3 minutes.
- alert: SQL Server Agent DOWN
expr: windows_service_state{instance="SQL",exported_name="sqlserveragent",state="running"} == 0
expr: windows_service_state{instance="SQL",name="sqlserveragent",state="running"} == 0
for: 3m
labels:
severity: high
@@ -110,7 +168,7 @@ groups:
# Sends an alert when the 'mssqlserver' service is not in the running state for 3 minutes.
- alert: SQL Server DOWN
expr: windows_service_state{instance="SQL",exported_name="mssqlserver",state="running"} == 0
expr: windows_service_state{instance="SQL",name="mssqlserver",state="running"} == 0
for: 3m
labels:
severity: high

View File

@@ -0,0 +1,50 @@
# smbclient collector
The smbclient collector collects metrics from MS SmbClient hosts through perflib
|||
-|-
Metric name prefix | `windows_smbclient`
Classes | [Win32_PerfRawData_SMB](https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-smb/)<br/>
Enabled by default? | No
## Flags
### `--collectors.smbclient.list`
Lists the Perflib Objects that are queried for data along with the perlfib object id
### `--collectors.smbclient.enabled`
Comma-separated list of collectors to use, for example: `--collectors.smbclient.enabled=ServerShares`. Matching is case-sensitive. Depending on the smb protocol version not all performance counters may be available. Use `--collectors.smbclient.list` to obtain a list of supported collectors.
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_smbclient_data_queue_seconds_total` | Seconds requests waited on queue on this share | counter | `server`, `share`|
`windows_smbclient_read_queue_seconds_total` | Seconds read requests waited on queue on this share | counter | `server`, `share`|
`windows_smbclient_write_queue_seconds_total` | Seconds write requests waited on queue on this share | counter | `server`, `share`|
`windows_smbclient_request_seconds_total` | Seconds waiting for requests on this share | counter | `server`, `share`|
`windows_smbclient_stalls_total` | The number of requests delayed based on insufficient credits on this share | counter | `server`, `share`|
`windows_smbclient_requests_queued` | The point in time (current) number of requests outstanding on this share | counter | `server`, `share`|
`windows_smbclient_data_bytes_total` | The bytes read or written on this share | counter | `server`, `share`|
`windows_smbclient_requests_total` | The requests on this share | counter | `server`, `share`|
`windows_smbclient_metadata_requests_total` | The metadata requests on this share | counter | `server`, `share`|
`windows_smbclient_read_bytes_via_smbdirect_total` | The bytes read from this share via RDMA direct placement | TBD | `server`, `share`|
`windows_smbclient_read_bytes_total` | The bytes read on this share | counter | `server`, `share`|
`windows_smbclient_read_requests_via_smbdirect_total` | The read requests on this share via RDMA direct placement | TBD | `server`, `share`|
`windows_smbclient_read_requests_total` | The read requests on this share | counter | `server`, `share`|
`windows_smbclient_turbo_io_reads_total` | The read requests that go through Turbo I/O | TBD | `server`, `share`|
`windows_smbclient_turbo_io_writes_total` | The write requests that go through Turbo I/O | TBD | `server`, `share`|
`windows_smbclient_write_bytes_via_smbdirect_total` | The written bytes to this share via RDMA direct placement | TBD | `server`, `share`|
`windows_smbclient_write_bytes_total` | The bytes written on this share | counter | `server`, `share`|
`windows_smbclient_write_requests_via_smbdirect_total` | The write requests to this share via RDMA direct placement | TBD | `server`, `share`|
`windows_smbclient_write_requests_total` | The write requests on this share | counter | `server`, `share`|
`windows_smbclient_read_seconds_total` | Seconds waiting for read requests on this share | counter | `server`, `share`|
`windows_smbclient_write_seconds_total` | Seconds waiting for write requests on this share | counter | `server`, `share`|
## Useful queries
```
# Average request queue length (includes read and write).
irate(windows_smbclient_data_queue_seconds_total)
# Request latency milliseconds (includes read and write).
irate(windows_smbclient_request_seconds_total) / irate(windows_smbclient_requests_total) * 1000
```
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -2,12 +2,13 @@
The terminal_services collector exposes terminal services (Remote Desktop Services) performance metrics.
|||
-|-
Metric name prefix | `terminal_services`
Data source | Perflib/WMI
Classes | [`Win32_PerfRawData_LocalSessionManager_TerminalServices`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/), [`Win32_PerfRawData_TermService_TerminalServicesSession`](https://docs.microsoft.com/en-us/previous-versions/aa394344(v%3Dvs.85)), [`Win32_PerfRawData_RemoteDesktopConnectionBrokerPerformanceCounterProvider_RemoteDesktopConnectionBrokerCounterset`](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/mt729067(v%3Dws.11))
Enabled by default? | No
| | |
|-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| __Metric name prefix__ | `terminal_services` |
| __Data source__ | Perflib/WMI, Win32 |
| __Classes__ | [`Win32_PerfRawData_LocalSessionManager_TerminalServices`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/), [`Win32_PerfRawData_TermService_TerminalServicesSession`](https://docs.microsoft.com/en-us/previous-versions/aa394344(v%3Dvs.85)), [`Win32_PerfRawData_RemoteDesktopConnectionBrokerPerformanceCounterProvider_RemoteDesktopConnectionBrokerCounterset`](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/mt729067(v%3Dws.11)) |
| __Win32 API__ | [WTSEnumerateSessionsEx](https://learn.microsoft.com/en-us/windows/win32/api/wtsapi32/nf-wtsapi32-wtsenumeratesessionsexw) |
| __Enabled by default?__ | No |
## Flags
@@ -15,34 +16,116 @@ None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_terminal_services_local_session_count` | Number of local Terminal Services sessions. | gauge | `session`
`windows_terminal_services_connection_broker_performance_total`* | The total number of connections handled by the Connection Brokers since the service started. | counter | `connection`
`windows_terminal_services_handles` | Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process. | gauge | `session_name`
`windows_terminal_services_page_fault_total` | Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page. | counter | `session_name`
`windows_terminal_services_page_file_bytes` | Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `session_name`
`windows_terminal_services_page_file_bytes_peak` | Maximum number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `session_name`
`windows_terminal_services_privileged_time_seconds_total` | total elapsed time that the threads of the process have spent executing code in privileged mode. | Counter | `session_name`
`windows_terminal_services_processor_time_seconds_total` | total elapsed time that all of the threads of this process used the processor to execute instructions. | Counter | `session_name`
`windows_terminal_services_user_time_seconds_total` | total elapsed time that this process's threads have spent executing code in user mode. Applications, environment subsystems, and integral subsystems execute in user mode. | Counter | `session_name`
`windows_terminal_services_pool_non_paged_bytes` | Number of bytes in the non-paged pool, an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. This property displays the last observed value only; it is not an average. | gauge | `session_name`
`windows_terminal_services_pool_paged_bytes` | Number of bytes in the paged pool, an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. This property displays the last observed value only; it is not an average. | gauge | `session_name`
`windows_terminal_services_private_bytes` | Current number of bytes this process has allocated that cannot be shared with other processes. | gauge | `session_name`
`windows_terminal_services_threads` | Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread. | gauge | `session_name`
`windows_terminal_services_virtual_bytes` | Current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries. | gauge | `session_name`
`windows_terminal_services_virtual_bytes_peak` | Maximum number of bytes of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process might limit its ability to load libraries. | gauge | `session_name`
`windows_terminal_services_working_set_bytes` | Current number of bytes in the working set of this process. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `session_name`
`windows_terminal_services_working_set_bytes_peak` | Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `session_name`
| Name | Description | Type | Labels |
|------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------------|
| `windows_terminal_services_session_info` | Info about active WTS sessions | gauge | host,user,state |
| `windows_terminal_services_local_session_count` | Number of local Terminal Services sessions. | gauge | `session` |
| `windows_terminal_services_connection_broker_performance_total`* | The total number of connections handled by the Connection Brokers since the service started. | counter | `connection` |
| `windows_terminal_services_handles` | Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process. | gauge | `session_name` |
| `windows_terminal_services_page_fault_total` | Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page. | counter | `session_name` |
| `windows_terminal_services_page_file_bytes` | Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `session_name` |
| `windows_terminal_services_page_file_bytes_peak` | Maximum number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `session_name` |
| `windows_terminal_services_privileged_time_seconds_total` | total elapsed time that the threads of the process have spent executing code in privileged mode. | Counter | `session_name` |
| `windows_terminal_services_processor_time_seconds_total` | total elapsed time that all of the threads of this process used the processor to execute instructions. | Counter | `session_name` |
| `windows_terminal_services_user_time_seconds_total` | total elapsed time that this process's threads have spent executing code in user mode. Applications, environment subsystems, and integral subsystems execute in user mode. | Counter | `session_name` |
| `windows_terminal_services_pool_non_paged_bytes` | Number of bytes in the non-paged pool, an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. This property displays the last observed value only; it is not an average. | gauge | `session_name` |
| `windows_terminal_services_pool_paged_bytes` | Number of bytes in the paged pool, an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. This property displays the last observed value only; it is not an average. | gauge | `session_name` |
| `windows_terminal_services_private_bytes` | Current number of bytes this process has allocated that cannot be shared with other processes. | gauge | `session_name` |
| `windows_terminal_services_threads` | Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread. | gauge | `session_name` |
| `windows_terminal_services_virtual_bytes` | Current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries. | gauge | `session_name` |
| `windows_terminal_services_virtual_bytes_peak` | Maximum number of bytes of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process might limit its ability to load libraries. | gauge | `session_name` |
| `windows_terminal_services_working_set_bytes` | Current number of bytes in the working set of this process. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `session_name` |
| `windows_terminal_services_working_set_bytes_peak` | Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `session_name` |
`* windows_terminal_services_connection_broker_performance_total` only collected if server has `Remote Desktop Connection Broker` role.
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
```
windows_remote_fx_net_udp_packets_sent_total{session_name="RDP-Tcp 0"} 0
# HELP windows_terminal_services_cpu_time_seconds_total Total elapsed time that this process's threads have spent executing code.
# TYPE windows_terminal_services_cpu_time_seconds_total counter
windows_terminal_services_cpu_time_seconds_total{mode="RDP-Tcp 0",session_name="privileged"} 98.4843739
windows_terminal_services_cpu_time_seconds_total{mode="RDP-Tcp 0",session_name="processor"} 620.4687488999999
windows_terminal_services_cpu_time_seconds_total{mode="RDP-Tcp 0",session_name="user"} 521.9843741
# HELP windows_terminal_services_handles Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process.
# TYPE windows_terminal_services_handles gauge
windows_terminal_services_handles{session_name="RDP-Tcp 0"} 20999
# HELP windows_terminal_services_page_fault_total Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page.
# TYPE windows_terminal_services_page_fault_total counter
windows_terminal_services_page_fault_total{session_name="RDP-Tcp 0"} 1.0436271e+07
# HELP windows_terminal_services_page_file_bytes Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.
# TYPE windows_terminal_services_page_file_bytes gauge
windows_terminal_services_page_file_bytes{session_name="RDP-Tcp 0"} 4.310188032e+09
# HELP windows_terminal_services_page_file_bytes_peak Maximum number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.
# TYPE windows_terminal_services_page_file_bytes_peak gauge
windows_terminal_services_page_file_bytes_peak{session_name="RDP-Tcp 0"} 4.817412096e+09
# HELP windows_terminal_services_pool_non_paged_bytes Number of bytes in the non-paged pool, an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. This property displays the last observed value only; it is not an average.
# TYPE windows_terminal_services_pool_non_paged_bytes gauge
windows_terminal_services_pool_non_paged_bytes{session_name="RDP-Tcp 0"} 1.325456e+06
# HELP windows_terminal_services_pool_paged_bytes Number of bytes in the paged pool, an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. This property displays the last observed value only; it is not an average.
# TYPE windows_terminal_services_pool_paged_bytes gauge
windows_terminal_services_pool_paged_bytes{session_name="RDP-Tcp 0"} 2.4651264e+07
# HELP windows_terminal_services_private_bytes Current number of bytes this process has allocated that cannot be shared with other processes.
# TYPE windows_terminal_services_private_bytes gauge
windows_terminal_services_private_bytes{session_name="RDP-Tcp 0"} 4.310188032e+09
# HELP windows_terminal_services_session_info Terminal Services sessions info
# TYPE windows_terminal_services_session_info gauge
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="active",user="domain\\user"} 1
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="connect_query",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="connected",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="disconnected",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="down",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="idle",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="init",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="listen",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="reset",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="RDP-Tcp 0",state="shadow",user="domain\\user"} 0
windows_terminal_services_session_info{host="",session_name="console",state="active",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="connect_query",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="connected",user=""} 1
windows_terminal_services_session_info{host="",session_name="console",state="disconnected",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="down",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="idle",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="init",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="listen",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="reset",user=""} 0
windows_terminal_services_session_info{host="",session_name="console",state="shadow",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="active",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="connect_query",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="connected",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="disconnected",user=""} 1
windows_terminal_services_session_info{host="",session_name="services",state="down",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="idle",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="init",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="listen",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="reset",user=""} 0
windows_terminal_services_session_info{host="",session_name="services",state="shadow",user=""} 0
# HELP windows_terminal_services_threads Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.
# TYPE windows_terminal_services_threads gauge
windows_terminal_services_threads{session_name="RDP-Tcp 0"} 676
# HELP windows_terminal_services_virtual_bytes Current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries.
# TYPE windows_terminal_services_virtual_bytes gauge
windows_terminal_services_virtual_bytes{session_name="RDP-Tcp 0"} 9.3228347629568e+13
# HELP windows_terminal_services_virtual_bytes_peak Maximum number of bytes of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process might limit its ability to load libraries.
# TYPE windows_terminal_services_virtual_bytes_peak gauge
windows_terminal_services_virtual_bytes_peak{session_name="RDP-Tcp 0"} 9.323192164352e+13
# HELP windows_terminal_services_working_set_bytes Current number of bytes in the working set of this process. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.
# TYPE windows_terminal_services_working_set_bytes gauge
windows_terminal_services_working_set_bytes{session_name="RDP-Tcp 0"} 6.0632064e+09
# HELP windows_terminal_services_working_set_bytes_peak Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.
# TYPE windows_terminal_services_working_set_bytes_peak gauge
windows_terminal_services_working_set_bytes_peak{session_name="RDP-Tcp 0"} 6.74854912e+09
```
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
Use metrics can be combined with other metrics to create useful queries. For example, with remote_fx metrics:
```
windows_remote_fx_net_loss_rate * on(session_name) group_left(user) (windows_terminal_services_session_info == 1)
```
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -31,8 +31,12 @@ import (
"github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web"
webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
"golang.org/x/sys/windows"
)
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
const PROCESS_ALL_ACCESS = windows.STANDARD_RIGHTS_REQUIRED | windows.SYNCHRONIZE | windows.SPECIFIC_RIGHTS_ALL
// Same struct prometheus uses for their /version endpoint.
// Separate copy to avoid pulling all of prometheus as a dependency
type prometheusVersion struct {
@@ -44,6 +48,32 @@ type prometheusVersion struct {
GoVersion string `json:"goVersion"`
}
// Mapping of priority names to uin32 values required by windows.SetPriorityClass
var priorityStringToInt = map[string]uint32{
"realtime": windows.REALTIME_PRIORITY_CLASS,
"high": windows.HIGH_PRIORITY_CLASS,
"abovenormal": windows.ABOVE_NORMAL_PRIORITY_CLASS,
"normal": windows.NORMAL_PRIORITY_CLASS,
"belownormal": windows.BELOW_NORMAL_PRIORITY_CLASS,
"low": windows.IDLE_PRIORITY_CLASS,
}
func setPriorityWindows(pid int, priority uint32) error {
handle, err := windows.OpenProcess(PROCESS_ALL_ACCESS, false, uint32(pid))
if err != nil {
return err
}
//nolint:errcheck
defer windows.CloseHandle(handle) // Technically this can fail, but we ignore it
err = windows.SetPriorityClass(handle, priority)
if err != nil {
return err
}
return nil
}
func main() {
app := kingpin.New("windows_exporter", "A metrics collector for Windows.")
var (
@@ -84,6 +114,10 @@ func main() {
"debug.enabled",
"If true, windows_exporter will expose debug endpoints under /debug/pprof.",
).Default("false").Bool()
processPriority = app.Flag(
"process.priority",
"Priority of the exporter process. Higher priorities may improve exporter responsiveness during periods of system load. Can be one of [\"realtime\", \"high\", \"abovenormal\", \"normal\", \"belownormal\", \"low\"]",
).Default("normal").String()
)
winlogConfig := &winlog.Config{}
@@ -145,6 +179,16 @@ func main() {
return
}
// Only set process priority if a non-default and valid value has been set
if *processPriority != "normal" && priorityStringToInt[*processPriority] != 0 {
_ = level.Debug(logger).Log("msg", "setting process priority to "+*processPriority)
err = setPriorityWindows(os.Getpid(), priorityStringToInt[*processPriority])
if err != nil {
_ = level.Error(logger).Log("msg", "failed to set process priority", "err", err)
os.Exit(1)
}
}
if err = wmi.InitWbem(logger); err != nil {
_ = level.Error(logger).Log("err", err)
os.Exit(1)
@@ -180,14 +224,14 @@ func main() {
mux := http.NewServeMux()
mux.HandleFunc(*metricsPath, withConcurrencyLimit(*maxRequests, collectors.BuildServeHTTP(*disableExporterMetrics, *timeoutMargin)))
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
mux.HandleFunc("/health", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, err := fmt.Fprintln(w, `{"status":"ok"}`)
if err != nil {
_ = level.Debug(logger).Log("Failed to write to stream", "err", err)
_ = level.Debug(logger).Log("msg", "Failed to write to stream", "err", err)
}
})
mux.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
mux.HandleFunc("/version", func(w http.ResponseWriter, _ *http.Request) {
// we can't use "version" directly as it is a package, and not an object that
// can be serialized.
err := json.NewEncoder(w).Encode(prometheusVersion{
@@ -211,34 +255,6 @@ func main() {
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
if *metricsPath != "/" && *metricsPath != "" {
landingConfig := web.LandingConfig{
Name: "Windows Exporter",
Description: "Prometheus Exporter for Windows servers",
Version: version.Info(),
Links: []web.LandingLinks{
{
Address: *metricsPath,
Text: "Metrics",
},
{
Address: "/health",
Text: "Health Check",
},
{
Address: "/version",
Text: "Version Info",
},
},
}
landingPage, err := web.NewLandingPage(landingConfig)
if err != nil {
_ = level.Error(logger).Log("msg", "failed to generate landing page", "err", err)
os.Exit(1)
}
mux.Handle("/", landingPage)
}
_ = level.Info(logger).Log("msg", "Starting windows_exporter", "version", version.Info())
_ = level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext())
_ = level.Debug(logger).Log("msg", "Go MAXPROCS", "procs", runtime.GOMAXPROCS(0))

28
go.mod
View File

@@ -1,23 +1,23 @@
module github.com/prometheus-community/windows_exporter
go 1.21
go 1.22
require (
github.com/Microsoft/hcsshim v0.12.3
github.com/Microsoft/hcsshim v0.12.5
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/dimchansky/utfbom v1.1.1
github.com/go-kit/log v0.2.1
github.com/go-ole/go-ole v1.3.0
github.com/prometheus/client_golang v1.19.0
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.53.0
github.com/prometheus/common v0.55.0
github.com/prometheus/exporter-toolkit v0.11.0
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/stretchr/testify v1.9.0
github.com/yusufpapurcu/wmi v1.2.4
go.opencensus.io v0.24.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
golang.org/x/sys v0.20.0
golang.org/x/sys v0.22.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -32,21 +32,21 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
google.golang.org/grpc v1.62.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

75
go.sum
View File

@@ -2,8 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0=
github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ=
github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0=
github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
@@ -52,17 +52,14 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -72,23 +69,25 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g=
github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
@@ -105,77 +104,57 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
@@ -197,10 +176,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -10,6 +10,9 @@ Param (
)
$ErrorActionPreference = "Stop"
# The MSI version is not semver compliant, so just take the numerical parts
$MsiVersion = $Version -replace '^v?([0-9\.]+).*$','$1'
# Get absolute path to executable before switching directories
$PathToExecutable = Resolve-Path $PathToExecutable
# Set working dir to this directory, reset previous on exit
@@ -25,7 +28,7 @@ Copy-Item -Force $PathToExecutable Work/windows_exporter.exe
Write-Verbose "Creating windows_exporter-${Version}-${Arch}.msi"
$wixArch = @{"amd64" = "x64"; "arm64" = "arm64"}[$Arch]
Invoke-Expression "wix build -arch $wixArch -o .\windows_exporter-$($Version)-$($Arch).msi .\windows_exporter.wxs -d Version=$($Version) -ext WixToolset.Firewall.wixext -ext WixToolset.Util.wixext"
Invoke-Expression "wix build -arch $wixArch -o .\windows_exporter-$($Version)-$($Arch).msi .\windows_exporter.wxs -d Version=$($MsiVersion) -ext WixToolset.Firewall.wixext -ext WixToolset.Util.wixext"
Write-Verbose "Done!"
Pop-Location

View File

@@ -5,7 +5,9 @@
<?define PlatformProgramFiles = "ProgramFilesFolder" ?>
<?endif?>
<Package UpgradeCode="66a6eb5b-1fc2-4b14-a362-5ceec6413308" Name="windows_exporter" Version="$(var.Version)" Manufacturer="prometheus-community" Language="1033" Codepage="1252"><SummaryInformation Manufacturer="prometheus-community" Description="windows_exporter $(var.Version) installer" />
<Package UpgradeCode="66a6eb5b-1fc2-4b14-a362-5ceec6413308" Name="windows_exporter" Version="$(var.Version)" Manufacturer="prometheus-community" Language="1033" Codepage="1252">
<SummaryInformation Manufacturer="prometheus-community" Description="windows_exporter $(var.Version) installer" />
<Media Id="1" Cabinet="windows_exporter.cab" EmbedCab="yes" />
<MajorUpgrade Schedule="afterInstallInitialize" DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." />
@@ -16,7 +18,8 @@
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]" Condition="EXTRA_FLAGS" />
<Property Id="ADD_FIREWALL_EXCEPTION" Secure="yes" />
<SetProperty Id="FirewallException" After="InstallFiles" Sequence="execute" Value="[ADD_FIREWALL_EXCEPTION]" Condition="ADD_FIREWALL_EXCEPTION" />
<Property Id="ENABLE_V1_PERFORMANCE_COUNTERS" Secure="yes" Value="yes"/>
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--web.listen-address [LISTEN_ADDR]:[LISTEN_PORT]" Condition="LISTEN_ADDR&lt;&gt;&quot;&quot; OR LISTEN_PORT&lt;&gt;9182" />
@@ -47,6 +50,25 @@
<Custom Action="RemoveEventSource" After="InstallInitialize" />
</InstallExecuteSequence>
<SetProperty
Id="EnableV1PerformanceCounters"
Value="&quot;[%ComSpec]&quot; /c lodctr.exe /E:Lsa &amp; lodctr.exe /E:PerfProc &amp; lodctr.exe /R"
Before="EnableV1PerformanceCounters"
Sequence="execute"
/>
<CustomAction
Id="EnableV1PerformanceCounters"
BinaryRef="Wix4UtilCA_$(sys.BUILDARCHSHORT)"
DllEntry="WixSilentExec"
Execute="deferred"
Impersonate="no"
Return="check"
/>
<InstallExecuteSequence>
<Custom Action="EnableV1PerformanceCounters" Before="InstallFinalize" Condition="ENABLE_V1_PERFORMANCE_COUNTERS=&quot;yes&quot;"/>
</InstallExecuteSequence>
<Property Id="TEXTFILE_DIRS" Secure="yes" />
<SetProperty Id="TextfileDirsFlag" After="InstallFiles" Sequence="execute" Value="--collector.textfile.directories [TEXTFILE_DIRS]" Condition="TEXTFILE_DIRS" />

View File

@@ -490,8 +490,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ad metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ad metrics", "err", err)
return err
}
return nil
@@ -649,14 +649,14 @@ type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
TransitivesuboperationsPersec uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_DirectoryServices_DirectoryServices
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -1350,5 +1350,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
float64(dst[0].TombstonesVisitedPersec),
)
return nil, nil
return nil
}

View File

@@ -145,8 +145,8 @@ func (c *collector) Build() error {
}
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectADCSCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "desc", desc, "err", err)
if err := c.collectADCSCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "err", err)
return err
}
return nil
@@ -169,17 +169,17 @@ type perflibADCS struct {
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
}
func (c *collector) collectADCSCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectADCSCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibADCS, 0)
if _, ok := ctx.PerfObjects["Certification Authority"]; !ok {
return nil, errors.New("perflib did not contain an entry for Certification Authority")
return errors.New("perflib did not contain an entry for Certification Authority")
}
err := perflib.UnmarshalObject(ctx.PerfObjects["Certification Authority"], &dst, c.logger)
if err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("perflib query for Certification Authority (ADCS) returned empty result set")
return errors.New("perflib query for Certification Authority (ADCS) returned empty result set")
}
for _, d := range dst {
@@ -267,5 +267,5 @@ func (c *collector) collectADCSCounters(ctx *types.ScrapeContext, ch chan<- prom
)
}
return nil, nil
return nil
}

View File

@@ -254,8 +254,8 @@ func (c *collector) Build() error {
// Collect implements the Collector interface
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cache metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cache metrics", "err", err)
return err
}
return nil
@@ -295,10 +295,10 @@ type perflibCache struct {
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []perflibCache // Single-instance class, array is required but will have single entry.
if err := perflib.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, c.logger); err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -475,5 +475,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
dst[0].SyncPinReadsTotal,
)
return nil, nil
return nil
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/collector/ad"
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs"
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs"
@@ -23,6 +24,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/logon"
"github.com/prometheus-community/windows_exporter/pkg/collector/memory"
@@ -45,11 +47,13 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/nps"
"github.com/prometheus-community/windows_exporter/pkg/collector/os"
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
"github.com/prometheus-community/windows_exporter/pkg/collector/process"
"github.com/prometheus-community/windows_exporter/pkg/collector/remote_fx"
"github.com/prometheus-community/windows_exporter/pkg/collector/scheduled_task"
"github.com/prometheus-community/windows_exporter/pkg/collector/service"
"github.com/prometheus-community/windows_exporter/pkg/collector/smb"
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
"github.com/prometheus-community/windows_exporter/pkg/collector/smtp"
"github.com/prometheus-community/windows_exporter/pkg/collector/system"
"github.com/prometheus-community/windows_exporter/pkg/collector/tcp"
@@ -83,6 +87,8 @@ func NewWithFlags(app *kingpin.Application) Collectors {
}
// NewWithConfig To be called by the external libraries for collector initialization without running kingpin.Parse
//
//goland:noinspection GoUnusedExportedFunction
func NewWithConfig(logger log.Logger, config Config) Collectors {
collectors := map[string]types.Collector{}
collectors[ad.Name] = ad.New(logger, &config.Ad)
@@ -101,6 +107,7 @@ func NewWithConfig(logger log.Logger, config Config) Collectors {
collectors[exchange.Name] = exchange.New(logger, &config.Fsrmquota)
collectors[hyperv.Name] = hyperv.New(logger, &config.Hyperv)
collectors[iis.Name] = iis.New(logger, &config.Iis)
collectors[license.Name] = license.New(logger, &config.License)
collectors[logical_disk.Name] = logical_disk.New(logger, &config.LogicalDisk)
collectors[logon.Name] = logon.New(logger, &config.Logon)
collectors[memory.Name] = memory.New(logger, &config.Memory)
@@ -123,11 +130,13 @@ func NewWithConfig(logger log.Logger, config Config) Collectors {
collectors[nps.Name] = nps.New(logger, &config.Nps)
collectors[os.Name] = os.New(logger, &config.Os)
collectors[physical_disk.Name] = physical_disk.New(logger, &config.PhysicalDisk)
collectors[printer.Name] = printer.New(logger, &config.Printer)
collectors[process.Name] = process.New(logger, &config.Process)
collectors[remote_fx.Name] = remote_fx.New(logger, &config.RemoteFx)
collectors[scheduled_task.Name] = scheduled_task.New(logger, &config.ScheduledTask)
collectors[service.Name] = service.New(logger, &config.Service)
collectors[smb.Name] = smb.New(logger, &config.Smb)
collectors[smbclient.Name] = smbclient.New(logger, &config.SmbClient)
collectors[smtp.Name] = smtp.New(logger, &config.Smtp)
collectors[system.Name] = system.New(logger, &config.System)
collectors[teradici_pcoip.Name] = teradici_pcoip.New(logger, &config.TeradiciPcoip)
@@ -161,11 +170,12 @@ func (c *Collectors) SetPerfCounterQuery() error {
var (
err error
perfCounterDependencies []string
perfCounterNames []string
perfIndicies []string
perfCounterNames []string
perfIndicies []string
)
perfCounterDependencies := make([]string, 0, len(c.collectors))
for _, collector := range c.collectors {
perfCounterNames, err = collector.GetPerfCounter()
if err != nil {

View File

@@ -16,6 +16,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/logon"
"github.com/prometheus-community/windows_exporter/pkg/collector/memory"
@@ -38,11 +39,13 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/nps"
"github.com/prometheus-community/windows_exporter/pkg/collector/os"
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
"github.com/prometheus-community/windows_exporter/pkg/collector/process"
"github.com/prometheus-community/windows_exporter/pkg/collector/remote_fx"
"github.com/prometheus-community/windows_exporter/pkg/collector/scheduled_task"
"github.com/prometheus-community/windows_exporter/pkg/collector/service"
"github.com/prometheus-community/windows_exporter/pkg/collector/smb"
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
"github.com/prometheus-community/windows_exporter/pkg/collector/smtp"
"github.com/prometheus-community/windows_exporter/pkg/collector/system"
"github.com/prometheus-community/windows_exporter/pkg/collector/tcp"
@@ -72,6 +75,7 @@ type Config struct {
Fsrmquota exchange.Config `yaml:"fsrmquota"`
Hyperv hyperv.Config `yaml:"hyperv"`
Iis iis.Config `yaml:"iis"`
License license.Config `yaml:"license"`
LogicalDisk logical_disk.Config `yaml:"logical_disk"`
Logon logon.Config `yaml:"logon"`
Memory memory.Config `yaml:"memory"`
@@ -94,11 +98,13 @@ type Config struct {
Nps nps.Config `yaml:"nps"`
Os os.Config `yaml:"os"`
PhysicalDisk physical_disk.Config `yaml:"physical_disk"`
Printer printer.Config `yaml:"printer"`
Process process.Config `yaml:"process"`
RemoteFx remote_fx.Config `yaml:"remote_fx"`
ScheduledTask scheduled_task.Config `yaml:"scheduled_task"`
Service service.Config `yaml:"service"`
Smb smb.Config `yaml:"smb"`
SmbClient smbclient.Config `yaml:"smbclient"`
Smtp smtp.Config `yaml:"smtp"`
System system.Config `yaml:"system"`
TeradiciPcoip teradici_pcoip.Config `yaml:"teradici_pcoip"`
@@ -112,6 +118,8 @@ type Config struct {
}
// ConfigDefaults Is an interface to be used by the external libraries. It holds all ConfigDefaults form all collectors
//
//goland:noinspection GoUnusedGlobalVariable
var ConfigDefaults = Config{
Ad: ad.ConfigDefaults,
Adcs: adcs.ConfigDefaults,
@@ -129,6 +137,7 @@ var ConfigDefaults = Config{
Fsrmquota: exchange.ConfigDefaults,
Hyperv: hyperv.ConfigDefaults,
Iis: iis.ConfigDefaults,
License: license.ConfigDefaults,
LogicalDisk: logical_disk.ConfigDefaults,
Logon: logon.ConfigDefaults,
Memory: memory.ConfigDefaults,
@@ -151,11 +160,13 @@ var ConfigDefaults = Config{
Nps: nps.ConfigDefaults,
Os: os.ConfigDefaults,
PhysicalDisk: physical_disk.ConfigDefaults,
Printer: printer.ConfigDefaults,
Process: process.ConfigDefaults,
RemoteFx: remote_fx.ConfigDefaults,
ScheduledTask: scheduled_task.ConfigDefaults,
Service: service.ConfigDefaults,
Smb: smb.ConfigDefaults,
SmbClient: smbclient.ConfigDefaults,
Smtp: smtp.ConfigDefaults,
System: system.ConfigDefaults,
TeradiciPcoip: teradici_pcoip.ConfigDefaults,

View File

@@ -3,7 +3,6 @@
package container
import (
"fmt"
"strings"
"github.com/Microsoft/hcsshim"
@@ -193,8 +192,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting collector metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting collector metrics", "err", err)
return err
}
return nil
@@ -208,12 +207,12 @@ func (c *collector) containerClose(container hcsshim.Container) {
}
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
// Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil {
_ = level.Error(c.logger).Log("msg", "Err in Getting containers", "err", err)
return nil, err
return err
}
count := len(containers)
@@ -224,7 +223,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
float64(count),
)
if count == 0 {
return nil, nil
return nil
}
containerPrefixes := make(map[string]string)
@@ -322,18 +321,18 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
hnsEndpoints, err := hcsshim.HNSListEndpointRequest()
if err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to collect network stats for containers")
return nil, nil
return err
}
if len(hnsEndpoints) == 0 {
_ = level.Info(c.logger).Log("msg", fmt.Sprintf("No network stats for containers to collect"))
return nil, nil
_ = level.Info(c.logger).Log("msg", "No network stats for containers to collect")
return nil
}
for _, endpoint := range hnsEndpoints {
endpointStats, err := hcsshim.GetHNSEndpointStats(endpoint.Id)
if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Failed to collect network stats for interface %s", endpoint.Id), "err", err)
_ = level.Warn(c.logger).Log("msg", "Failed to collect network stats for interface "+endpoint.Id, "err", err)
continue
}
@@ -342,7 +341,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
endpointId := strings.ToUpper(endpoint.Id)
if !ok {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Failed to collect network stats for container %s", containerId))
_ = level.Warn(c.logger).Log("msg", "Failed to collect network stats for container "+containerId)
continue
}
@@ -386,7 +385,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
}
}
return nil, nil
return nil
}
func getContainerIdWithPrefix(containerDetails hcsshim.ContainerProperties) string {

View File

@@ -86,23 +86,23 @@ type win32_Processor struct {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "err", err)
return err
}
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32_Processor
// We use a static query here because the provided methods in wmi.go all issue a SELECT *;
// This results in the time-consuming LoadPercentage field being read which seems to measure each CPU
// serially over a 1 second interval, so the scrape time is at least 1s * num_sockets
if err := wmi.Query(win32ProcessorQuery, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
// Some CPUs end up exposing trailing spaces for certain strings, so clean them up
@@ -121,5 +121,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -77,21 +77,21 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cs metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cs metrics", "err", err)
return err
}
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
// Get systeminfo for number of processors
systemInfo := sysinfoapi.GetSystemInfo()
// Get memory status for physical memory
mem, err := sysinfoapi.GlobalMemoryStatusEx()
if err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -108,15 +108,15 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
hostname, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSHostname)
if err != nil {
return nil, err
return err
}
domain, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSDomain)
if err != nil {
return nil, err
return err
}
fqdn, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSFullyQualified)
if err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -128,5 +128,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
fqdn,
)
return nil, nil
return nil
}

View File

@@ -22,7 +22,7 @@ type Config struct {
}
var ConfigDefaults = Config{
DfsrEnabledCollectors: "",
DfsrEnabledCollectors: "connection,folder,volume",
}
// collector contains the metric and state data of the DFSR collectors.
@@ -130,8 +130,9 @@ func (c *collector) SetLogger(logger log.Logger) {
func (c *collector) GetPerfCounter() ([]string, error) {
// Perflib sources are dynamic, depending on the enabled child collectors
var perflibDependencies []string
for _, source := range utils.ExpandEnabledChildCollectors(*c.dfsrEnabledCollectors) {
expandedChildCollectors := utils.ExpandEnabledChildCollectors(*c.dfsrEnabledCollectors)
perflibDependencies := make([]string, 0, len(expandedChildCollectors))
for _, source := range expandedChildCollectors {
perflibDependencies = append(perflibDependencies, dfsrGetPerfObjectName(source))
}

View File

@@ -150,21 +150,21 @@ var (
// Collect sends the metric values for each metric to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "err", err)
return err
}
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_DiskDrive
if err := wmi.Query(win32DiskQuery, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
for _, disk := range dst {
@@ -222,5 +222,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
}
}
return nil, nil
return nil
}

View File

@@ -208,8 +208,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting dns metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting dns metrics", "err", err)
return err
}
return nil
@@ -261,14 +261,14 @@ type Win32_PerfRawData_DNS_DNS struct {
ZoneTransferSOARequestSent uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_DNS_DNS
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -520,5 +520,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
float64(dst[0].SecureUpdateReceived),
)
return nil, nil
return nil
}

View File

@@ -118,8 +118,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "err", err)
return err
}
return nil
@@ -142,14 +142,14 @@ type MSFT_FSRMQuota struct {
SoftLimit bool
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []MSFT_FSRMQuota
q := wmi.QueryAll(&dst, c.logger)
var count int
if err := wmi.QueryNamespace(q, &dst, "root/microsoft/windows/fsrm"); err != nil {
return nil, err
return err
}
for _, quota := range dst {
@@ -214,5 +214,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
prometheus.GaugeValue,
float64(count),
)
return nil, nil
return nil
}

View File

@@ -743,63 +743,63 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectVmHealth(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV health status metrics", "desc", desc, "err", err)
if err := c.collectVmHealth(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV health status metrics", "err", err)
return err
}
if desc, err := c.collectVmVid(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV pages metrics", "desc", desc, "err", err)
if err := c.collectVmVid(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV pages metrics", "err", err)
return err
}
if desc, err := c.collectVmHv(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV hv status metrics", "desc", desc, "err", err)
if err := c.collectVmHv(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV hv status metrics", "err", err)
return err
}
if desc, err := c.collectVmProcessor(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV processor metrics", "desc", desc, "err", err)
if err := c.collectVmProcessor(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV processor metrics", "err", err)
return err
}
if desc, err := c.collectHostLPUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host logical processors metrics", "desc", desc, "err", err)
if err := c.collectHostLPUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host logical processors metrics", "err", err)
return err
}
if desc, err := c.collectHostCpuUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host CPU metrics", "desc", desc, "err", err)
if err := c.collectHostCpuUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host CPU metrics", "err", err)
return err
}
if desc, err := c.collectVmCpuUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV VM CPU metrics", "desc", desc, "err", err)
if err := c.collectVmCpuUsage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV VM CPU metrics", "err", err)
return err
}
if desc, err := c.collectVmSwitch(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV switch metrics", "desc", desc, "err", err)
if err := c.collectVmSwitch(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV switch metrics", "err", err)
return err
}
if desc, err := c.collectVmEthernet(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV ethernet metrics", "desc", desc, "err", err)
if err := c.collectVmEthernet(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV ethernet metrics", "err", err)
return err
}
if desc, err := c.collectVmStorage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual storage metrics", "desc", desc, "err", err)
if err := c.collectVmStorage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual storage metrics", "err", err)
return err
}
if desc, err := c.collectVmNetwork(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual network metrics", "desc", desc, "err", err)
if err := c.collectVmNetwork(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual network metrics", "err", err)
return err
}
if desc, err := c.collectVmMemory(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual memory metrics", "desc", desc, "err", err)
if err := c.collectVmMemory(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual memory metrics", "err", err)
return err
}
@@ -812,11 +812,11 @@ type Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
HealthOk uint32
}
func (c *collector) collectVmHealth(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmHealth(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, health := range dst {
@@ -834,7 +834,7 @@ func (c *collector) collectVmHealth(ch chan<- prometheus.Metric) (*prometheus.De
}
return nil, nil
return nil
}
// Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition ..,
@@ -845,11 +845,11 @@ type Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition struct {
RemotePhysicalPages uint64
}
func (c *collector) collectVmVid(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmVid(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, page := range dst {
@@ -880,7 +880,7 @@ func (c *collector) collectVmVid(ch chan<- prometheus.Metric) (*prometheus.Desc,
}
return nil, nil
return nil
}
// Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition ...
@@ -909,11 +909,11 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition struct {
VirtualTLBPages uint64
}
func (c *collector) collectVmHv(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmHv(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1038,7 +1038,7 @@ func (c *collector) collectVmHv(ch chan<- prometheus.Metric) (*prometheus.Desc,
}
return nil, nil
return nil
}
// Win32_PerfRawData_HvStats_HyperVHypervisor ...
@@ -1047,11 +1047,11 @@ type Win32_PerfRawData_HvStats_HyperVHypervisor struct {
VirtualProcessors uint64
}
func (c *collector) collectVmProcessor(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmProcessor(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisor
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1070,7 +1070,7 @@ func (c *collector) collectVmProcessor(ch chan<- prometheus.Metric) (*prometheus
}
return nil, nil
return nil
}
// Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor ...
@@ -1081,11 +1081,11 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor struct {
PercentTotalRunTime uint
}
func (c *collector) collectHostLPUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectHostLPUsage(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1123,7 +1123,7 @@ func (c *collector) collectHostLPUsage(ch chan<- prometheus.Metric) (*prometheus
}
return nil, nil
return nil
}
// Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor ...
@@ -1136,11 +1136,11 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor struct {
CPUWaitTimePerDispatch uint64
}
func (c *collector) collectHostCpuUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectHostCpuUsage(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1191,7 +1191,7 @@ func (c *collector) collectHostCpuUsage(ch chan<- prometheus.Metric) (*prometheu
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor ...
@@ -1204,11 +1204,11 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor struct {
CPUWaitTimePerDispatch uint64
}
func (c *collector) collectVmCpuUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmCpuUsage(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1266,7 +1266,7 @@ func (c *collector) collectVmCpuUsage(ch chan<- prometheus.Metric) (*prometheus.
}
return nil, nil
return nil
}
// Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch ...
@@ -1298,11 +1298,11 @@ type Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch struct {
PurgedMacAddressesPersec uint64
}
func (c *collector) collectVmSwitch(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmSwitch(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1449,7 +1449,7 @@ func (c *collector) collectVmSwitch(ch chan<- prometheus.Metric) (*prometheus.De
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter ...
@@ -1463,11 +1463,11 @@ type Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter struct {
FramesSentPersec uint64
}
func (c *collector) collectVmEthernet(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmEthernet(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1519,7 +1519,7 @@ func (c *collector) collectVmEthernet(ch chan<- prometheus.Metric) (*prometheus.
}
return nil, nil
return nil
}
// Win32_PerfRawData_Counters_HyperVVirtualStorageDevice ...
@@ -1533,11 +1533,11 @@ type Win32_PerfRawData_Counters_HyperVVirtualStorageDevice struct {
WriteOperationsPerSec uint64
}
func (c *collector) collectVmStorage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmStorage(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_Counters_HyperVVirtualStorageDevice
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1588,7 +1588,7 @@ func (c *collector) collectVmStorage(ch chan<- prometheus.Metric) (*prometheus.D
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter ...
@@ -1602,11 +1602,11 @@ type Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter struct {
PacketsSentPersec uint64
}
func (c *collector) collectVmNetwork(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmNetwork(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1657,7 +1657,7 @@ func (c *collector) collectVmNetwork(ch chan<- prometheus.Metric) (*prometheus.D
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM ...
@@ -1675,11 +1675,11 @@ type Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM struct {
RemovedMemory uint64
}
func (c *collector) collectVmMemory(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectVmMemory(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, obj := range dst {
@@ -1758,5 +1758,5 @@ func (c *collector) collectVmMemory(ch chan<- prometheus.Metric) (*prometheus.De
)
}
return nil, nil
return nil
}

View File

@@ -54,7 +54,7 @@ func getIISVersion(logger log.Logger) simple_version {
defer func() {
err = k.Close()
if err != nil {
_ = level.Warn(logger).Log("msg", fmt.Sprintf("Failed to close registry key"), "err", err)
_ = level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
}
}()
@@ -926,23 +926,23 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectWebService(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
if err := c.collectWebService(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err)
return err
}
if desc, err := c.collectAPP_POOL_WAS(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
if err := c.collectAPP_POOL_WAS(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err)
return err
}
if desc, err := c.collectW3SVC_W3WP(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
if err := c.collectW3SVC_W3WP(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err)
return err
}
if desc, err := c.collectWebServiceCache(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
if err := c.collectWebServiceCache(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err)
return err
}
@@ -1039,10 +1039,10 @@ func dedupIISNames[V hasGetIISName](services []V) map[string]V {
return webServiceDeDuplicated
}
func (c *collector) collectWebService(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectWebService(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var webService []perflibWebService
if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service"], &webService, c.logger); err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -1298,7 +1298,7 @@ func (c *collector) collectWebService(ctx *types.ScrapeContext, ch chan<- promet
)
}
return nil, nil
return nil
}
type perflibAPP_POOL_WAS struct {
@@ -1331,10 +1331,10 @@ var applicationStates = map[uint32]string{
7: "Delete Pending",
}
func (c *collector) collectAPP_POOL_WAS(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectAPP_POOL_WAS(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var APP_POOL_WAS []perflibAPP_POOL_WAS
if err := perflib.UnmarshalObject(ctx.PerfObjects["APP_POOL_WAS"], &APP_POOL_WAS, c.logger); err != nil {
return nil, err
return err
}
appPoolDeDuplicated := dedupIISNames(APP_POOL_WAS)
@@ -1434,7 +1434,7 @@ func (c *collector) collectAPP_POOL_WAS(ctx *types.ScrapeContext, ch chan<- prom
)
}
return nil, nil
return nil
}
var workerProcessNameExtractor = regexp.MustCompile(`^(\d+)_(.+)$`)
@@ -1508,10 +1508,10 @@ type perflibW3SVC_W3WP_IIS8 struct {
WebSocketConnectionsRejected float64 `perflib:"WebSocket Connections Rejected / Sec"`
}
func (c *collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var W3SVC_W3WP []perflibW3SVC_W3WP
if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP, c.logger); err != nil {
return nil, err
return err
}
w3svcW3WPDeduplicated := dedupIISNames(W3SVC_W3WP)
@@ -1770,7 +1770,7 @@ func (c *collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, ch chan<- promet
if c.iis_version.major >= 8 {
var W3SVC_W3WP_IIS8 []perflibW3SVC_W3WP_IIS8
if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP_IIS8, c.logger); err != nil {
return nil, err
return err
}
w3svcW3WPIIS8Deduplicated := dedupIISNames(W3SVC_W3WP_IIS8)
@@ -1856,7 +1856,7 @@ func (c *collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, ch chan<- promet
}
}
return nil, nil
return nil
}
type perflibWebServiceCache struct {
@@ -1906,10 +1906,10 @@ type perflibWebServiceCache struct {
ServiceCache_OutputCacheQueriesTotal float64
}
func (c *collector) collectWebServiceCache(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectWebServiceCache(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var WebServiceCache []perflibWebServiceCache
if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service Cache"], &WebServiceCache, c.logger); err != nil {
return nil, err
return err
}
for _, app := range WebServiceCache {
@@ -2097,5 +2097,5 @@ func (c *collector) collectWebServiceCache(ctx *types.ScrapeContext, ch chan<- p
)
}
return nil, nil
return nil
}

View File

@@ -0,0 +1,95 @@
//go:build windows
package license
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus-community/windows_exporter/pkg/headers/slc"
"github.com/prometheus-community/windows_exporter/pkg/types"
)
const Name = "license"
var labelMap = map[slc.SL_GENUINE_STATE]string{
slc.SL_GEN_STATE_IS_GENUINE: "genuine",
slc.SL_GEN_STATE_INVALID_LICENSE: "invalid_license",
slc.SL_GEN_STATE_TAMPERED: "tampered",
slc.SL_GEN_STATE_OFFLINE: "offline",
slc.SL_GEN_STATE_LAST: "last",
}
type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
type collector struct {
logger log.Logger
LicenseStatus *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
}
func (c *collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.LicenseStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status"),
"Status of windows license",
[]string{"state"},
nil,
)
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting license metrics", "err", err)
return err
}
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
status, err := slc.SLIsWindowsGenuineLocal()
if err != nil {
return err
}
for k, v := range labelMap {
val := 0.0
if status == k {
val = 1.0
}
ch <- prometheus.MustNewConstMetric(c.LicenseStatus, prometheus.GaugeValue, val, v)
}
return nil
}

View File

@@ -0,0 +1,12 @@
package license_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
"github.com/prometheus-community/windows_exporter/pkg/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, license.Name, license.NewWithFlags)
}

View File

@@ -3,16 +3,18 @@
package logical_disk
import (
"errors"
"encoding/binary"
"fmt"
"golang.org/x/sys/windows"
"regexp"
"strconv"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
@@ -21,15 +23,8 @@ const (
FlagLogicalDiskVolumeExclude = "collector.logical_disk.volume-exclude"
FlagLogicalDiskVolumeInclude = "collector.logical_disk.volume-include"
win32DiskQuery = "SELECT VolumeName,DeviceID FROM WIN32_LogicalDisk"
)
type Win32_LogicalDisk struct {
VolumeName string
DeviceID string
}
type Config struct {
VolumeInclude string `yaml:"volume_include"`
VolumeExclude string `yaml:"volume_exclude"`
@@ -47,6 +42,8 @@ type collector struct {
volumeInclude *string
volumeExclude *string
Information *prometheus.Desc
ReadOnly *prometheus.Desc
RequestsQueued *prometheus.Desc
AvgReadQueue *prometheus.Desc
AvgWriteQueue *prometheus.Desc
@@ -68,6 +65,14 @@ type collector struct {
volumeExcludePattern *regexp.Regexp
}
type volumeInfo struct {
filesystem string
serialNumber string
label string
volumeType string
readonly float64
}
func New(logger log.Logger, config *Config) types.Collector {
if config == nil {
config = &ConfigDefaults
@@ -109,115 +114,127 @@ func (c *collector) GetPerfCounter() ([]string, error) {
}
func (c *collector) Build() error {
c.Information = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with logical disk information",
[]string{"disk", "type", "volume", "volume_name", "filesystem", "serial_number"},
nil,
)
c.ReadOnly = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "readonly"),
"Whether the logical disk is read-only",
[]string{"volume"},
nil,
)
c.RequestsQueued = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
"The number of requests queued to the disk (LogicalDisk.CurrentDiskQueueLength)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.AvgReadQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "avg_read_requests_queued"),
"Average number of read requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskReadQueueLength)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.AvgWriteQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "avg_write_requests_queued"),
"Average number of write requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskWriteQueueLength)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.ReadBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_bytes_total"),
"The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.ReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "reads_total"),
"The number of read operations on the disk (LogicalDisk.DiskReadsPerSec)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.WriteBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_bytes_total"),
"The number of bytes transferred to the disk during write operations (LogicalDisk.DiskWriteBytesPerSec)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.WritesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "writes_total"),
"The number of write operations on the disk (LogicalDisk.DiskWritesPerSec)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.ReadTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_seconds_total"),
"Seconds that the disk was busy servicing read requests (LogicalDisk.PercentDiskReadTime)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.WriteTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_seconds_total"),
"Seconds that the disk was busy servicing write requests (LogicalDisk.PercentDiskWriteTime)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.FreeSpace = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "free_bytes"),
"Free space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.TotalSpace = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "size_bytes"),
"Total space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace_Base)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.IdleTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "idle_seconds_total"),
"Seconds that the disk was idle (LogicalDisk.PercentIdleTime)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.SplitIOs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "split_ios_total"),
"The number of I/Os to the disk were split into multiple I/Os (LogicalDisk.SplitIOPerSec)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.ReadLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_latency_seconds_total"),
"Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.WriteLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_latency_seconds_total"),
"Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
c.ReadWriteLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_write_latency_seconds_total"),
"Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)",
[]string{"volume", "volume_name"},
[]string{"volume"},
nil,
)
@@ -238,8 +255,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting logical_disk metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting logical_disk metrics", "err", err)
return err
}
return nil
@@ -250,7 +267,6 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
type logicalDisk struct {
Name string
VolumeName string
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
AvgDiskReadQueueLength float64 `perflib:"Avg. Disk Read Queue Length"`
AvgDiskWriteQueueLength float64 `perflib:"Avg. Disk Write Queue Length"`
@@ -269,19 +285,16 @@ type logicalDisk struct {
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst_Win32_LogicalDisk []Win32_LogicalDisk
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var (
err error
diskID string
info volumeInfo
dst []logicalDisk
)
if err := wmi.Query(win32DiskQuery, &dst_Win32_LogicalDisk); err != nil {
return nil, err
}
if len(dst_Win32_LogicalDisk) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
var dst []logicalDisk
if err := perflib.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, c.logger); err != nil {
return nil, err
if err = perflib.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, c.logger); err != nil {
return err
}
for _, volume := range dst {
@@ -290,145 +303,233 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
!c.volumeIncludePattern.MatchString(volume.Name) {
continue
}
for _, logicalDisk := range dst_Win32_LogicalDisk {
if logicalDisk.VolumeName == "" {
logicalDisk.VolumeName = "Local Disk"
}
if logicalDisk.DeviceID == volume.Name {
ch <- prometheus.MustNewConstMetric(
c.RequestsQueued,
prometheus.GaugeValue,
volume.CurrentDiskQueueLength,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.AvgReadQueue,
prometheus.GaugeValue,
volume.AvgDiskReadQueueLength*perflib.TicksToSecondScaleFactor,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.AvgWriteQueue,
prometheus.GaugeValue,
volume.AvgDiskWriteQueueLength*perflib.TicksToSecondScaleFactor,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTotal,
prometheus.CounterValue,
volume.DiskReadBytesPerSec,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.ReadsTotal,
prometheus.CounterValue,
volume.DiskReadsPerSec,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTotal,
prometheus.CounterValue,
volume.DiskWriteBytesPerSec,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.WritesTotal,
prometheus.CounterValue,
volume.DiskWritesPerSec,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.ReadTime,
prometheus.CounterValue,
volume.PercentDiskReadTime,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.WriteTime,
prometheus.CounterValue,
volume.PercentDiskWriteTime,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.FreeSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace_Base*1024*1024,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.TotalSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace*1024*1024,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.IdleTime,
prometheus.CounterValue,
volume.PercentIdleTime,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.SplitIOs,
prometheus.CounterValue,
volume.SplitIOPerSec,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.ReadLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerRead*perflib.TicksToSecondScaleFactor,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.WriteLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerWrite*perflib.TicksToSecondScaleFactor,
volume.Name,
logicalDisk.VolumeName,
)
ch <- prometheus.MustNewConstMetric(
c.ReadWriteLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerTransfer*perflib.TicksToSecondScaleFactor,
volume.Name,
logicalDisk.VolumeName,
)
break
}
diskID, err = getDiskIDByVolume(volume.Name)
if err != nil {
_ = level.Warn(c.logger).Log("msg", "failed to get disk ID for "+volume.Name, "err", err)
}
info, err = getVolumeInfo(volume.Name)
if err != nil {
_ = level.Warn(c.logger).Log("msg", "failed to get volume information for %s"+volume.Name, "err", err)
}
ch <- prometheus.MustNewConstMetric(
c.Information,
prometheus.GaugeValue,
1,
diskID,
info.volumeType,
volume.Name,
info.label,
info.filesystem,
info.serialNumber,
)
ch <- prometheus.MustNewConstMetric(
c.RequestsQueued,
prometheus.GaugeValue,
volume.CurrentDiskQueueLength,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.AvgReadQueue,
prometheus.GaugeValue,
volume.AvgDiskReadQueueLength*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.AvgWriteQueue,
prometheus.GaugeValue,
volume.AvgDiskWriteQueueLength*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTotal,
prometheus.CounterValue,
volume.DiskReadBytesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadsTotal,
prometheus.CounterValue,
volume.DiskReadsPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTotal,
prometheus.CounterValue,
volume.DiskWriteBytesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WritesTotal,
prometheus.CounterValue,
volume.DiskWritesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadTime,
prometheus.CounterValue,
volume.PercentDiskReadTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WriteTime,
prometheus.CounterValue,
volume.PercentDiskWriteTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FreeSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace_Base*1024*1024,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace*1024*1024,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.IdleTime,
prometheus.CounterValue,
volume.PercentIdleTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SplitIOs,
prometheus.CounterValue,
volume.SplitIOPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerRead*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WriteLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerWrite*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadWriteLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerTransfer*perflib.TicksToSecondScaleFactor,
volume.Name,
)
}
return nil, nil
return nil
}
func getDriveType(driveType uint32) string {
switch driveType {
case windows.DRIVE_UNKNOWN:
return "unknown"
case windows.DRIVE_NO_ROOT_DIR:
return "norootdir"
case windows.DRIVE_REMOVABLE:
return "removable"
case windows.DRIVE_FIXED:
return "fixed"
case windows.DRIVE_REMOTE:
return "remote"
case windows.DRIVE_CDROM:
return "cdrom"
case windows.DRIVE_RAMDISK:
return "ramdisk"
default:
return "unknown"
}
}
// getDiskIDByVolume returns the disk ID for a given volume.
func getDiskIDByVolume(rootDrive string) (string, error) {
// Open a volume handle to the Disk Root.
var err error
var f windows.Handle
// mode has to include FILE_SHARE permission to allow concurrent access to the disk.
// use 0 as access mode to avoid admin permission.
mode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE | windows.FILE_SHARE_DELETE)
f, err = windows.CreateFile(
windows.StringToUTF16Ptr(`\\.\`+rootDrive),
0, mode, nil, windows.OPEN_EXISTING, uint32(windows.FILE_ATTRIBUTE_READONLY), 0)
if err != nil {
return "", err
}
defer windows.Close(f)
controlCode := uint32(5636096) // IOCTL_VOLUME_GET_VOLUME_DISK_EXTENTS
volumeDiskExtents := make([]byte, 16*1024)
var bytesReturned uint32
err = windows.DeviceIoControl(f, controlCode, nil, 0, &volumeDiskExtents[0], uint32(len(volumeDiskExtents)), &bytesReturned, nil)
if err != nil {
return "", err
}
if uint(binary.LittleEndian.Uint32(volumeDiskExtents)) != 1 {
return "", fmt.Errorf("could not identify physical drive for %s", rootDrive)
}
diskId := strconv.FormatUint(uint64(binary.LittleEndian.Uint32(volumeDiskExtents[8:])), 10)
return diskId, nil
}
func getVolumeInfo(rootDrive string) (volumeInfo, error) {
if !strings.HasSuffix(rootDrive, ":") {
return volumeInfo{}, nil
}
volPath := windows.StringToUTF16Ptr(rootDrive + `\`)
volBufLabel := make([]uint16, windows.MAX_PATH+1)
volSerialNum := uint32(0)
fsFlags := uint32(0)
volBufType := make([]uint16, windows.MAX_PATH+1)
driveType := windows.GetDriveType(volPath)
err := windows.GetVolumeInformation(volPath, &volBufLabel[0], uint32(len(volBufLabel)),
&volSerialNum, nil, &fsFlags, &volBufType[0], uint32(len(volBufType)))
if err != nil {
if driveType != windows.DRIVE_CDROM && driveType != windows.DRIVE_REMOVABLE {
return volumeInfo{}, err
}
return volumeInfo{}, nil
}
return volumeInfo{
volumeType: getDriveType(driveType),
label: windows.UTF16PtrToString(&volBufLabel[0]),
filesystem: windows.UTF16PtrToString(&volBufType[0]),
serialNumber: fmt.Sprintf("%X", volSerialNum),
readonly: float64(fsFlags & windows.FILE_READ_ONLY_VOLUME),
}, nil
}

View File

@@ -62,8 +62,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting user metrics", "err", err)
return err
}
return nil
@@ -75,14 +75,14 @@ type Win32_LogonSession struct {
LogonType uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_LogonSession
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
// Init counters
@@ -221,5 +221,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
float64(cachedunlock),
"cached_unlock",
)
return nil, nil
return nil
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota"
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/logon"
"github.com/prometheus-community/windows_exporter/pkg/collector/memory"
@@ -39,11 +40,13 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/nps"
"github.com/prometheus-community/windows_exporter/pkg/collector/os"
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
"github.com/prometheus-community/windows_exporter/pkg/collector/process"
"github.com/prometheus-community/windows_exporter/pkg/collector/remote_fx"
"github.com/prometheus-community/windows_exporter/pkg/collector/scheduled_task"
"github.com/prometheus-community/windows_exporter/pkg/collector/service"
"github.com/prometheus-community/windows_exporter/pkg/collector/smb"
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
"github.com/prometheus-community/windows_exporter/pkg/collector/smtp"
"github.com/prometheus-community/windows_exporter/pkg/collector/system"
"github.com/prometheus-community/windows_exporter/pkg/collector/tcp"
@@ -76,6 +79,7 @@ var Map = map[string]types.CollectorBuilderWithFlags{
fsrmquota.Name: fsrmquota.NewWithFlags,
hyperv.Name: hyperv.NewWithFlags,
iis.Name: iis.NewWithFlags,
license.Name: license.NewWithFlags,
logical_disk.Name: logical_disk.NewWithFlags,
logon.Name: logon.NewWithFlags,
memory.Name: memory.NewWithFlags,
@@ -98,11 +102,13 @@ var Map = map[string]types.CollectorBuilderWithFlags{
nps.Name: nps.NewWithFlags,
os.Name: os.NewWithFlags,
physical_disk.Name: physical_disk.NewWithFlags,
printer.Name: printer.NewWithFlags,
process.Name: process.NewWithFlags,
remote_fx.Name: remote_fx.NewWithFlags,
scheduled_task.Name: scheduled_task.NewWithFlags,
service.Name: service.NewWithFlags,
smb.Name: smb.NewWithFlags,
smbclient.Name: smbclient.NewWithFlags,
smtp.Name: smtp.NewWithFlags,
system.Name: system.NewWithFlags,
teradici_pcoip.Name: teradici_pcoip.NewWithFlags,

View File

@@ -291,8 +291,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting memory metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting memory metrics", "err", err)
return err
}
return nil
@@ -335,10 +335,10 @@ type memory struct {
WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []memory
if err := perflib.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, c.logger); err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -533,5 +533,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
dst[0].WriteCopiesPersec,
)
return nil, nil
return nil
}

View File

@@ -106,8 +106,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting msmq metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting msmq metrics", "err", err)
return err
}
return nil
@@ -122,11 +122,11 @@ type Win32_PerfRawData_MSMQ_MSMQQueue struct {
MessagesinQueue uint64
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_MSMQ_MSMQQueue
q := wmi.QueryAllWhere(&dst, *c.queryWhereClause, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, msmq := range dst {
@@ -158,5 +158,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
strings.ToLower(msmq.Name),
)
}
return nil, nil
return nil
}

View File

@@ -1932,7 +1932,7 @@ func (c *collector) Build() error {
return nil
}
type mssqlCollectorFunc func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error)
type mssqlCollectorFunc func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error
func (c *collector) execute(ctx *types.ScrapeContext, name string, fn mssqlCollectorFunc, ch chan<- prometheus.Metric, sqlInstance string, wg *sync.WaitGroup) {
// Reset failure counter on each scrape
@@ -1940,7 +1940,7 @@ func (c *collector) execute(ctx *types.ScrapeContext, name string, fn mssqlColle
defer wg.Done()
begin := time.Now()
_, err := fn(ctx, ch, sqlInstance)
err := fn(ctx, ch, sqlInstance)
duration := time.Since(begin)
var success float64
@@ -2038,12 +2038,12 @@ type mssqlAccessMethods struct {
WorktablesFromCacheRatio_Base float64 `perflib:"Worktables From Cache Base_Base"`
}
func (c *collector) collectAccessMethods(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectAccessMethods(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlAccessMethods
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -2355,7 +2355,7 @@ func (c *collector) collectAccessMethods(ctx *types.ScrapeContext, ch chan<- pro
sqlInstance,
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica docs:
@@ -2373,12 +2373,12 @@ type mssqlAvailabilityReplica struct {
SendstoTransportPersec float64 `perflib:"Sends to Transport/sec"`
}
func (c *collector) collectAvailabilityReplica(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectAvailabilityReplica(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlAvailabilityReplica
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -2450,7 +2450,7 @@ func (c *collector) collectAvailabilityReplica(ctx *types.ScrapeContext, ch chan
sqlInstance, replicaName,
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager docs:
@@ -2481,12 +2481,12 @@ type mssqlBufferManager struct {
Targetpages float64 `perflib:"Target pages"`
}
func (c *collector) collectBufferManager(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectBufferManager(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlBufferManager
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -2652,7 +2652,7 @@ func (c *collector) collectBufferManager(ctx *types.ScrapeContext, ch chan<- pro
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica docs:
@@ -2685,12 +2685,12 @@ type mssqlDatabaseReplica struct {
TransactionDelay float64 `perflib:"Transaction Delay"`
}
func (c *collector) collectDatabaseReplica(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectDatabaseReplica(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlDatabaseReplica
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -2867,7 +2867,7 @@ func (c *collector) collectDatabaseReplica(ctx *types.ScrapeContext, ch chan<- p
sqlInstance, replicaName,
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases docs:
@@ -2924,12 +2924,12 @@ type mssqlDatabases struct {
XTPMemoryUsedKB float64 `perflib:"XTP Memory Used (KB)"`
}
func (c *collector) collectDatabases(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectDatabases(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlDatabases
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -3274,7 +3274,7 @@ func (c *collector) collectDatabases(ctx *types.ScrapeContext, ch chan<- prometh
sqlInstance, dbName,
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics docs:
@@ -3306,12 +3306,12 @@ type mssqlGeneralStatistics struct {
UserConnections float64 `perflib:"User Connections"`
}
func (c *collector) collectGeneralStatistics(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectGeneralStatistics(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlGeneralStatistics
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -3484,7 +3484,7 @@ func (c *collector) collectGeneralStatistics(ctx *types.ScrapeContext, ch chan<-
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerLocks docs:
@@ -3501,12 +3501,12 @@ type mssqlLocks struct {
NumberofDeadlocksPersec float64 `perflib:"Number of Deadlocks/sec"`
}
func (c *collector) collectLocks(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectLocks(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlLocks
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -3571,7 +3571,7 @@ func (c *collector) collectLocks(ctx *types.ScrapeContext, ch chan<- prometheus.
sqlInstance, lockResourceName,
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager docs:
@@ -3599,12 +3599,12 @@ type mssqlMemoryManager struct {
TotalServerMemoryKB float64 `perflib:"Total Server Memory (KB)"`
}
func (c *collector) collectMemoryManager(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectMemoryManager(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlMemoryManager
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -3749,7 +3749,7 @@ func (c *collector) collectMemoryManager(ctx *types.ScrapeContext, ch chan<- pro
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics docs:
@@ -3768,12 +3768,12 @@ type mssqlSQLStatistics struct {
UnsafeAutoParamsPersec float64 `perflib:"Unsafe Auto-Params/sec"`
}
func (c *collector) collectSQLStats(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectSQLStats(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlSQLStatistics
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -3855,7 +3855,7 @@ func (c *collector) collectSQLStats(ctx *types.ScrapeContext, ch chan<- promethe
)
}
return nil, nil
return nil
}
// Win32_PerfRawData_MSSQLSERVER_SQLServerWaitStatistics docs:
@@ -3876,12 +3876,12 @@ type mssqlWaitStatistics struct {
WaitStatsTransactionOwnershipWaits float64 `perflib:"Transaction ownership waits"`
}
func (c *collector) collectWaitStats(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectWaitStats(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlWaitStatistics
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -3972,7 +3972,7 @@ func (c *collector) collectWaitStats(ctx *types.ScrapeContext, ch chan<- prometh
)
}
return nil, nil
return nil
}
type mssqlSQLErrors struct {
@@ -3982,12 +3982,12 @@ type mssqlSQLErrors struct {
// Win32_PerfRawData_MSSQLSERVER_SQLServerErrors docs:
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object
func (c *collector) collectSQLErrors(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectSQLErrors(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlSQLErrors
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -4004,7 +4004,7 @@ func (c *collector) collectSQLErrors(ctx *types.ScrapeContext, ch chan<- prometh
)
}
return nil, nil
return nil
}
type mssqlTransactions struct {
@@ -4025,12 +4025,12 @@ type mssqlTransactions struct {
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
func (c *collector) collectTransactions(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
func (c *collector) collectTransactions(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlTransactions
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, v := range dst {
@@ -4126,5 +4126,5 @@ func (c *collector) collectTransactions(ctx *types.ScrapeContext, ch chan<- prom
)
}
return nil, nil
return nil
}

View File

@@ -196,8 +196,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting net metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting net metrics", "err", err)
return err
}
return nil
@@ -228,11 +228,11 @@ type networkInterface struct {
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []networkInterface
if err := perflib.UnmarshalObject(ctx.PerfObjects["Network Interface"], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, nic := range dst {
@@ -326,5 +326,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
name,
)
}
return nil, nil
return nil
}

View File

@@ -81,8 +81,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "err", err)
return err
}
return nil
@@ -98,11 +98,11 @@ type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
ThrowToCatchDepthPersec uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -140,5 +140,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -74,8 +74,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "err", err)
return err
}
return nil
@@ -91,11 +91,11 @@ type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
NumberofTLBimportsPersec uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -126,5 +126,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -80,8 +80,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrjit metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrjit metrics", "err", err)
return err
}
return nil
@@ -99,11 +99,11 @@ type Win32_PerfRawData_NETFramework_NETCLRJit struct {
TotalNumberofILBytesJitted uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -141,5 +141,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -115,8 +115,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrloading metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrloading metrics", "err", err)
return err
}
return nil
@@ -143,11 +143,11 @@ type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
TotalNumberofLoadFailures uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -220,5 +220,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -101,8 +101,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "err", err)
return err
}
return nil
@@ -123,11 +123,11 @@ type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
TotalNumberofContentions uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -186,5 +186,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -139,8 +139,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "err", err)
return err
}
return nil
@@ -180,11 +180,11 @@ type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
PromotedMemoryfromGen1 uint64
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -329,5 +329,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -94,8 +94,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "err", err)
return err
}
return nil
@@ -113,11 +113,11 @@ type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
TotalRemoteCalls uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -169,5 +169,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -80,8 +80,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "err", err)
return err
}
return nil
@@ -98,11 +98,11 @@ type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
TotalRuntimeChecks uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
for _, process := range dst {
@@ -140,5 +140,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -229,12 +229,12 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.CollectAccept(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accept data: %s %v", desc, err))
if err := c.CollectAccept(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accept data: %s", err))
return err
}
if desc, err := c.CollectAccounting(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accounting data: %s %v", desc, err))
if err := c.CollectAccounting(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accounting data: %s", err))
return err
}
return nil
@@ -279,11 +279,11 @@ type Win32_PerfRawData_IAS_NPSAccountingServer struct {
// CollectAccept sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) CollectAccept(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) CollectAccept(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_IAS_NPSAuthenticationServer
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -364,14 +364,14 @@ func (c *collector) CollectAccept(ch chan<- prometheus.Metric) (*prometheus.Desc
float64(dst[0].AccessUnknownType),
)
return nil, nil
return nil
}
func (c *collector) CollectAccounting(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) CollectAccounting(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_IAS_NPSAccountingServer
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -446,5 +446,5 @@ func (c *collector) CollectAccounting(ch chan<- prometheus.Metric) (*prometheus.
float64(dst[0].AccountingUnknownType),
)
return nil, nil
return nil
}

View File

@@ -6,17 +6,21 @@ import (
"errors"
"fmt"
"os"
"strconv"
"strings"
"syscall"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/headers/kernel32"
"github.com/prometheus-community/windows_exporter/pkg/headers/netapi32"
"github.com/prometheus-community/windows_exporter/pkg/headers/psapi"
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
)
@@ -159,8 +163,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting os metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting os metrics", "err", err)
return err
}
return nil
@@ -184,52 +188,35 @@ type Win32_OperatingSystem struct {
Version string
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
nwgi, err := netapi32.GetWorkstationInfo()
if err != nil {
return nil, err
return err
}
gmse, err := sysinfoapi.GlobalMemoryStatusEx()
if err != nil {
return nil, err
return err
}
currentTime := time.Now()
timezoneName, _ := currentTime.Zone()
timeZoneInfo, err := kernel32.GetDynamicTimeZoneInformation()
if err != nil {
return err
}
// timeZoneKeyName contains the english name of the timezone.
timezoneName := syscall.UTF16ToString(timeZoneInfo.TimeZoneKeyName[:])
// Get total allocation of paging files across all disks.
memManKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management`, registry.QUERY_VALUE)
defer memManKey.Close()
if err != nil {
return nil, err
return err
}
pagingFiles, _, pagingErr := memManKey.GetStringsValue("ExistingPageFiles")
// Get build number and product name from registry
ntKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
defer ntKey.Close()
if err != nil {
return nil, err
}
pn, _, err := ntKey.GetStringValue("ProductName")
if err != nil {
return nil, err
}
bn, _, err := ntKey.GetStringValue("CurrentBuildNumber")
if err != nil {
return nil, err
}
revision, _, err := ntKey.GetIntegerValue("UBR")
if errors.Is(err, registry.ErrNotExist) {
revision = 0
} else if err != nil {
return nil, err
}
var fsipf float64
for _, pagingFile := range pagingFiles {
@@ -243,14 +230,39 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
}
}
// Get build number and product name from registry
ntKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
defer ntKey.Close()
if err != nil {
return err
}
pn, _, err := ntKey.GetStringValue("ProductName")
if err != nil {
return err
}
bn, _, err := ntKey.GetStringValue("CurrentBuildNumber")
if err != nil {
return err
}
revision, _, err := ntKey.GetIntegerValue("UBR")
if errors.Is(err, registry.ErrNotExist) {
revision = 0
} else if err != nil {
return err
}
gpi, err := psapi.GetPerformanceInfo()
if err != nil {
return nil, err
return err
}
pfc := make([]pagingFileCounter, 0)
if err := perflib.UnmarshalObject(ctx.PerfObjects["Paging File"], &pfc, c.logger); err != nil {
return nil, err
return err
}
// Get current page file usage.
@@ -269,12 +281,12 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
c.OSInformation,
prometheus.GaugeValue,
1.0,
fmt.Sprintf("Microsoft %s", pn), // Caption
"Microsoft "+pn, // Caption
fmt.Sprintf("%d.%d.%s", nwgi.VersionMajor, nwgi.VersionMinor, bn), // Version
fmt.Sprintf("%d", nwgi.VersionMajor), // Major Version
fmt.Sprintf("%d", nwgi.VersionMinor), // Minor Version
bn, // Build number
fmt.Sprintf("%d", revision), // Revision
strconv.FormatUint(uint64(nwgi.VersionMajor), 10), // Major Version
strconv.FormatUint(uint64(nwgi.VersionMinor), 10), // Minor Version
bn, // Build number
strconv.FormatUint(revision, 10), // Revision
)
ch <- prometheus.MustNewConstMetric(
@@ -309,7 +321,7 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
fsipf,
)
} else {
_ = level.Debug(c.logger).Log("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
_ = level.Debug(c.logger).Log("msg", "Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
}
ch <- prometheus.MustNewConstMetric(
c.VirtualMemoryFreeBytes,
@@ -356,5 +368,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
float64(gmse.TotalPhys),
)
return nil, nil
return nil
}

View File

@@ -206,8 +206,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting physical_disk metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting physical_disk metrics", "err", err)
return err
}
return nil
@@ -232,10 +232,10 @@ type PhysicalDisk struct {
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PhysicalDisk
if err := perflib.UnmarshalObject(ctx.PerfObjects["PhysicalDisk"], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, disk := range dst {
@@ -334,5 +334,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
)
}
return nil, nil
return nil
}

View File

@@ -0,0 +1,227 @@
//go:build windows
package printer
import (
"fmt"
"regexp"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
)
const (
Name = "printer"
FlagPrinterInclude = "collector.printer.include"
FlagPrinterExclude = "collector.printer.exclude"
)
// printerStatusMap source: https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-printer#:~:text=Power%20Save-,PrinterStatus,Offline%20(7),-PrintJobDataType
var printerStatusMap = map[uint16]string{
1: "Other",
2: "Unknown",
3: "Idle",
4: "Printing",
5: "Warmup",
6: "Stopped Printing",
7: "Offline",
}
type Config struct {
printerInclude string `yaml:"printer_include"`
printerExclude string `yaml:"printer_exclude"`
}
var ConfigDefaults = Config{
printerInclude: ".+",
printerExclude: "",
}
type collector struct {
logger log.Logger
printerInclude *string
printerExclude *string
printerStatus *prometheus.Desc
printerJobStatus *prometheus.Desc
printerJobCount *prometheus.Desc
printerIncludePattern *regexp.Regexp
printerExcludePattern *regexp.Regexp
}
func New(logger log.Logger, config *Config) types.Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
printerInclude: &config.printerInclude,
printerExclude: &config.printerExclude,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{
printerInclude: app.Flag(
FlagPrinterInclude,
"Regular expression to match printers to collect metrics for",
).Default(ConfigDefaults.printerInclude).String(),
printerExclude: app.Flag(
FlagPrinterExclude,
"Regular expression to match printers to exclude",
).Default(ConfigDefaults.printerExclude).String(),
}
return c
}
func (c *collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) Build() error {
c.printerJobStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "job_status"),
"A counter of printer jobs by status",
[]string{"printer", "status"},
nil,
)
c.printerStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status"),
"Printer status",
[]string{"printer", "status"},
nil,
)
c.printerJobCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "job_count"),
"Number of jobs processed by the printer since the last reset",
[]string{"printer"},
nil,
)
var err error
c.printerIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.printerInclude))
if err != nil {
return err
}
c.printerExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.printerExclude))
return err
}
func (c *collector) GetName() string { return Name }
func (c *collector) GetPerfCounter() ([]string, error) { return []string{"Printer"}, nil }
type win32_Printer struct {
Name string
Default bool
PrinterStatus uint16
JobCountSinceLastReset uint32
}
type win32_PrintJob struct {
Name string
Status string
}
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectPrinterStatus(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed to collect printer status metrics", "err", err)
return err
}
if err := c.collectPrinterJobStatus(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed to collect printer job status metrics", "err", err)
return err
}
return nil
}
func (c *collector) collectPrinterStatus(ch chan<- prometheus.Metric) error {
var printers []win32_Printer
q := wmi.QueryAll(&printers, c.logger)
if err := wmi.Query(q, &printers); err != nil {
return err
}
for _, printer := range printers {
if c.printerExcludePattern.MatchString(printer.Name) ||
!c.printerIncludePattern.MatchString(printer.Name) {
continue
}
for printerStatus, printerStatusName := range printerStatusMap {
isCurrentStatus := 0.0
if printerStatus == printer.PrinterStatus {
isCurrentStatus = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.printerStatus,
prometheus.GaugeValue,
isCurrentStatus,
printer.Name,
printerStatusName,
)
}
ch <- prometheus.MustNewConstMetric(
c.printerJobCount,
prometheus.CounterValue,
float64(printer.JobCountSinceLastReset),
printer.Name,
)
}
return nil
}
func (c *collector) collectPrinterJobStatus(ch chan<- prometheus.Metric) error {
var printJobs []win32_PrintJob
q := wmi.QueryAll(&printJobs, c.logger)
if err := wmi.Query(q, &printJobs); err != nil {
return err
}
groupedPrintJobs := c.groupPrintJobs(printJobs)
for group, count := range groupedPrintJobs {
ch <- prometheus.MustNewConstMetric(
c.printerJobStatus,
prometheus.GaugeValue,
float64(count),
group.printerName,
group.status,
)
}
return nil
}
type PrintJobStatusGroup struct {
printerName string
status string
}
func (c *collector) groupPrintJobs(printJobs []win32_PrintJob) map[PrintJobStatusGroup]int {
groupedPrintJobs := make(map[PrintJobStatusGroup]int)
for _, printJob := range printJobs {
printerName := strings.Split(printJob.Name, ",")[0]
if c.printerExcludePattern.MatchString(printerName) ||
!c.printerIncludePattern.MatchString(printerName) {
continue
}
groupedPrintJobs[PrintJobStatusGroup{
printerName: printerName,
status: printJob.Status,
}]++
}
return groupedPrintJobs
}

View File

@@ -0,0 +1,17 @@
package printer_test
import (
"testing"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
"github.com/prometheus-community/windows_exporter/pkg/testutils"
)
func BenchmarkCollector(b *testing.B) {
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all printers.
printersInclude := ".+"
kingpin.CommandLine.GetArg(printer.FlagPrinterInclude).StringVar(&printersInclude)
testutils.FuncBenchmarkCollector(b, "printer", printer.NewWithFlags)
}

View File

@@ -3,10 +3,13 @@
package process
import (
"errors"
"fmt"
"golang.org/x/sys/windows"
"regexp"
"strconv"
"strings"
"syscall"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
@@ -23,18 +26,21 @@ const (
FlagProcessExclude = "collector.process.exclude"
FlagProcessInclude = "collector.process.include"
FlagEnableWorkerProcess = "collector.process.iis"
FlagEnableReportOwner = "collector.process.report-owner"
)
type Config struct {
ProcessInclude string `yaml:"process_include"`
ProcessExclude string `yaml:"process_exclude"`
EnableWorkerProcess bool `yaml:"enable_iis_worker_process"`
EnableReportOwner bool `yaml:"enable_report_owner"`
}
var ConfigDefaults = Config{
ProcessInclude: ".+",
ProcessExclude: "",
EnableWorkerProcess: false,
EnableReportOwner: false,
}
type collector struct {
@@ -44,6 +50,7 @@ type collector struct {
processExclude *string
enableWorkerProcess *bool
enableReportOwner *bool
StartTime *prometheus.Desc
CPUTimeTotal *prometheus.Desc
@@ -63,6 +70,8 @@ type collector struct {
processIncludePattern *regexp.Regexp
processExcludePattern *regexp.Regexp
lookupCache map[string]string
}
func New(logger log.Logger, config *Config) types.Collector {
@@ -95,6 +104,11 @@ func NewWithFlags(app *kingpin.Application) types.Collector {
FlagEnableWorkerProcess,
"Enable IIS worker process name queries. May cause the collector to leak memory.",
).Default("false").Bool(),
enableReportOwner: app.Flag(
FlagEnableReportOwner,
"Enable reporting of process owner.",
).Default("false").Bool(),
}
return c
}
@@ -116,97 +130,104 @@ func (c *collector) Build() error {
_ = level.Warn(c.logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
}
commonLabels := make([]string, 0)
if *c.enableReportOwner {
commonLabels = []string{"owner"}
}
c.StartTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "start_time"),
"Time of process start.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.CPUTimeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_time_total"),
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).",
[]string{"process", "process_id", "creating_process_id", "mode"},
append(commonLabels, "process", "process_id", "creating_process_id", "mode"),
nil,
)
c.HandleCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "handles"),
"Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.IOBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "io_bytes_total"),
"Bytes issued to I/O operations in different modes (read, write, other).",
[]string{"process", "process_id", "creating_process_id", "mode"},
append(commonLabels, "process", "process_id", "creating_process_id", "mode"),
nil,
)
c.IOOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "io_operations_total"),
"I/O operations issued in different modes (read, write, other).",
[]string{"process", "process_id", "creating_process_id", "mode"},
append(commonLabels, "process", "process_id", "creating_process_id", "mode"),
nil,
)
c.PageFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_faults_total"),
"Page faults by the threads executing in this process.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.PageFileBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_file_bytes"),
"Current number of bytes this process has used in the paging file(s).",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.PoolBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_bytes"),
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.",
[]string{"process", "process_id", "creating_process_id", "pool"},
append(commonLabels, "process", "process_id", "creating_process_id", "pool"),
nil,
)
c.PriorityBase = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "priority_base"),
"Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.PrivateBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "private_bytes"),
"Current number of bytes this process has allocated that cannot be shared with other processes.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.ThreadCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "threads"),
"Number of threads currently active in this process.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.VirtualBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "virtual_bytes"),
"Current size, in bytes, of the virtual address space that the process is using.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.WorkingSetPrivate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_private_bytes"),
"Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.WorkingSetPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_peak_bytes"),
"Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.WorkingSet = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_bytes"),
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.",
[]string{"process", "process_id", "creating_process_id"},
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.lookupCache = make(map[string]string)
var err error
c.processIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.processInclude))
@@ -270,10 +291,12 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
if *c.enableWorkerProcess {
q_wp := wmi.QueryAll(&dst_wp, c.logger)
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
_ = level.Debug(c.logger).Log(fmt.Sprintf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping\n", err))
_ = level.Debug(c.logger).Log("msg", "Could not query WebAdministration namespace for IIS worker processes", "err", err)
}
}
var owner string
for _, process := range data {
if process.Name == "_Total" ||
c.processExcludePattern.MatchString(process.Name) ||
@@ -294,205 +317,204 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
}
}
labels := make([]string, 0, 4)
if *c.enableReportOwner {
owner, err = c.getProcessOwner(int(process.IDProcess))
if err != nil {
owner = "unknown"
}
labels = []string{owner}
}
labels = append(labels, processName, pid, cpid)
ch <- prometheus.MustNewConstMetric(
c.StartTime,
prometheus.GaugeValue,
process.ElapsedTime,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.HandleCount,
prometheus.GaugeValue,
process.HandleCount,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.CPUTimeTotal,
prometheus.CounterValue,
process.PercentPrivilegedTime,
processName,
pid,
cpid,
"privileged",
append(labels, "privileged")...,
)
ch <- prometheus.MustNewConstMetric(
c.CPUTimeTotal,
prometheus.CounterValue,
process.PercentUserTime,
processName,
pid,
cpid,
"user",
append(labels, "user")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
prometheus.CounterValue,
process.IOOtherBytesPerSec,
processName,
pid,
cpid,
"other",
append(labels, "other")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
prometheus.CounterValue,
process.IOOtherOperationsPerSec,
processName,
pid,
cpid,
"other",
append(labels, "other")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
prometheus.CounterValue,
process.IOReadBytesPerSec,
processName,
pid,
cpid,
"read",
append(labels, "read")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
prometheus.CounterValue,
process.IOReadOperationsPerSec,
processName,
pid,
cpid,
"read",
append(labels, "read")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
prometheus.CounterValue,
process.IOWriteBytesPerSec,
processName,
pid,
cpid,
"write",
append(labels, "write")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
prometheus.CounterValue,
process.IOWriteOperationsPerSec,
processName,
pid,
cpid,
"write",
append(labels, "write")...,
)
ch <- prometheus.MustNewConstMetric(
c.PageFaultsTotal,
prometheus.CounterValue,
process.PageFaultsPerSec,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.PageFileBytes,
prometheus.GaugeValue,
process.PageFileBytes,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.PoolBytes,
prometheus.GaugeValue,
process.PoolNonpagedBytes,
processName,
pid,
cpid,
"nonpaged",
append(labels, "nonpaged")...,
)
ch <- prometheus.MustNewConstMetric(
c.PoolBytes,
prometheus.GaugeValue,
process.PoolPagedBytes,
processName,
pid,
cpid,
"paged",
append(labels, "paged")...,
)
ch <- prometheus.MustNewConstMetric(
c.PriorityBase,
prometheus.GaugeValue,
process.PriorityBase,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.PrivateBytes,
prometheus.GaugeValue,
process.PrivateBytes,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.ThreadCount,
prometheus.GaugeValue,
process.ThreadCount,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.VirtualBytes,
prometheus.GaugeValue,
process.VirtualBytes,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPrivate,
prometheus.GaugeValue,
process.WorkingSetPrivate,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPeak,
prometheus.GaugeValue,
process.WorkingSetPeak,
processName,
pid,
cpid,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSet,
prometheus.GaugeValue,
process.WorkingSet,
processName,
pid,
cpid,
labels...,
)
}
return nil
}
// ref: https://github.com/microsoft/hcsshim/blob/8beabacfc2d21767a07c20f8dd5f9f3932dbf305/internal/uvm/stats.go#L25
func (c *collector) getProcessOwner(pid int) (string, error) {
p, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid))
if errors.Is(err, syscall.Errno(0x57)) { // invalid parameter, for PIDs that don't exist
return "", errors.New("process not found")
}
if err != nil {
return "", fmt.Errorf("OpenProcess: %T %w", err, err)
}
defer windows.Close(p)
var tok windows.Token
if err = windows.OpenProcessToken(p, windows.TOKEN_QUERY, &tok); err != nil {
return "", fmt.Errorf("OpenProcessToken: %w", err)
}
tokenUser, err := tok.GetTokenUser()
if err != nil {
return "", fmt.Errorf("GetTokenUser: %w", err)
}
sid := tokenUser.User.Sid.String()
if owner, ok := c.lookupCache[sid]; ok {
return owner, nil
}
account, domain, _, err := tokenUser.User.Sid.LookupAccount("")
if err != nil {
c.lookupCache[sid] = sid
} else {
c.lookupCache[sid] = fmt.Sprintf(`%s\%s`, account, domain)
}
return c.lookupCache[sid], nil
}

View File

@@ -39,6 +39,9 @@ type collector struct {
TotalSentBytes *prometheus.Desc
UDPPacketsReceivedPersec *prometheus.Desc
UDPPacketsSentPersec *prometheus.Desc
FECRate *prometheus.Desc
LossRate *prometheus.Desc
RetransmissionRate *prometheus.Desc
// gfx
AverageEncodingTime *prometheus.Desc
@@ -69,7 +72,7 @@ func (c *collector) SetLogger(logger log.Logger) {
}
func (c *collector) GetPerfCounter() ([]string, error) {
return []string{"RemoteFX Network"}, nil
return []string{"RemoteFX Network", "RemoteFX Graphics"}, nil
}
func (c *collector) Build() error {
@@ -134,6 +137,24 @@ func (c *collector) Build() error {
[]string{"session_name"},
nil,
)
c.FECRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_fec_rate"),
"Forward Error Correction (FEC) percentage",
[]string{"session_name"},
nil,
)
c.LossRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_loss_rate"),
"Loss percentage",
[]string{"session_name"},
nil,
)
c.RetransmissionRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_retransmission_rate"),
"Percentage of packets that have been retransmitted",
[]string{"session_name"},
nil,
)
// gfx
c.AverageEncodingTime = prometheus.NewDesc(
@@ -184,12 +205,12 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
if err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session count metrics", "err", err)
return err
}
if desc, err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
if err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session count metrics", "err", err)
return err
}
return nil
@@ -207,18 +228,21 @@ type perflibRemoteFxNetwork struct {
TotalSentBytes float64 `perflib:"Total Sent Bytes"`
UDPPacketsReceivedPersec float64 `perflib:"UDP Packets Received/sec"`
UDPPacketsSentPersec float64 `perflib:"UDP Packets Sent/sec"`
FECRate float64 `perflib:"Forward Error Correction (FEC) percentage"`
LossRate float64 `perflib:"Loss percentage"`
RetransmissionRate float64 `perflib:"Percentage of packets that have been retransmitted"`
}
func (c *collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibRemoteFxNetwork, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Network"], &dst, c.logger)
if err != nil {
return nil, err
return err
}
for _, d := range dst {
// only connect metrics for remote named sessions
n := strings.ToLower(d.Name)
n := strings.ToLower(normalizeSessionName(d.Name))
if n == "" || n == "services" || n == "console" {
continue
}
@@ -226,64 +250,84 @@ func (c *collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, ch cha
c.BaseTCPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.BaseTCPRTT),
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.BaseUDPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.BaseUDPRTT),
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPBandwidth,
prometheus.GaugeValue,
(d.CurrentTCPBandwidth*1000)/8,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.CurrentTCPRTT),
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPBandwidth,
prometheus.GaugeValue,
(d.CurrentUDPBandwidth*1000)/8,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.CurrentUDPRTT),
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.TotalReceivedBytes,
prometheus.CounterValue,
d.TotalReceivedBytes,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.TotalSentBytes,
prometheus.CounterValue,
d.TotalSentBytes,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsReceivedPersec,
prometheus.CounterValue,
d.UDPPacketsReceivedPersec,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsSentPersec,
prometheus.CounterValue,
d.UDPPacketsSentPersec,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.FECRate,
prometheus.GaugeValue,
d.FECRate,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.LossRate,
prometheus.GaugeValue,
d.LossRate,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.RetransmissionRate,
prometheus.GaugeValue,
d.RetransmissionRate,
normalizeSessionName(d.Name),
)
}
return nil, nil
return nil
}
type perflibRemoteFxGraphics struct {
@@ -299,16 +343,16 @@ type perflibRemoteFxGraphics struct {
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
}
func (c *collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibRemoteFxGraphics, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Graphics"], &dst, c.logger)
if err != nil {
return nil, err
return err
}
for _, d := range dst {
// only connect metrics for remote named sessions
n := strings.ToLower(d.Name)
n := strings.ToLower(normalizeSessionName(d.Name))
if n == "" || n == "services" || n == "console" {
continue
}
@@ -316,60 +360,65 @@ func (c *collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch
c.AverageEncodingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.AverageEncodingTime),
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.FrameQuality,
prometheus.GaugeValue,
d.FrameQuality,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientClientResources,
d.Name,
normalizeSessionName(d.Name),
"client",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientNetworkResources,
d.Name,
normalizeSessionName(d.Name),
"network",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientServerResources,
d.Name,
normalizeSessionName(d.Name),
"server",
)
ch <- prometheus.MustNewConstMetric(
c.GraphicsCompressionratio,
prometheus.GaugeValue,
d.GraphicsCompressionratio,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.InputFramesPerSecond,
prometheus.CounterValue,
d.InputFramesPerSecond,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.OutputFramesPerSecond,
prometheus.CounterValue,
d.OutputFramesPerSecond,
d.Name,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.SourceFramesPerSecond,
prometheus.CounterValue,
d.SourceFramesPerSecond,
d.Name,
normalizeSessionName(d.Name),
)
}
return nil, nil
return nil
}
// normalizeSessionName ensure that the session is the same between WTS API and performance counters
func normalizeSessionName(sessionName string) string {
return strings.Replace(sessionName, "RDP-tcp", "RDP-Tcp", 1)
}

View File

@@ -3,17 +3,19 @@
package scheduled_task
import (
"errors"
"fmt"
"regexp"
"runtime"
"strings"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/go-ole/go-ole"
"github.com/go-ole/go-ole/oleutil"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
@@ -63,7 +65,6 @@ const (
TASK_RESULT_SUCCESS TaskResult = 0x0
)
// RegisteredTask ...
type ScheduledTask struct {
Name string
Path string
@@ -117,18 +118,6 @@ func (c *collector) GetPerfCounter() ([]string, error) {
}
func (c *collector) Build() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
if err != nil {
code := err.(*ole.OleError).Code()
if code != ole.S_OK && code != S_FALSE {
return err
}
}
defer ole.CoUninitialize()
c.LastResult = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "last_result"),
"The result that was returned the last time the registered task was run",
@@ -150,6 +139,8 @@ func (c *collector) Build() error {
nil,
)
var err error
c.taskIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.taskInclude))
if err != nil {
return err
@@ -164,8 +155,8 @@ func (c *collector) Build() error {
}
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting user metrics", "err", err)
return err
}
@@ -174,10 +165,10 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
var TASK_STATES = []string{"disabled", "queued", "ready", "running", "unknown"}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
scheduledTasks, err := getScheduledTasks()
if err != nil {
return nil, err
return err
}
for _, task := range scheduledTasks {
@@ -222,7 +213,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
}
}
return nil, nil
return nil
}
const SCHEDULED_TASK_PROGRAM_ID = "Schedule.Service.1"
@@ -231,6 +222,21 @@ const SCHEDULED_TASK_PROGRAM_ID = "Schedule.Service.1"
const S_FALSE = 0x00000001
func getScheduledTasks() (scheduledTasks ScheduledTasks, err error) {
// The only way to run WMI queries in parallel while being thread-safe is to
// ensure the CoInitialize[Ex]() call is bound to its current OS thread.
// Otherwise, attempting to initialize and run parallel queries across
// goroutines will result in protected memory errors.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
var oleCode *ole.OleError
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != S_FALSE {
return nil, err
}
}
defer ole.CoUninitialize()
schedClassID, err := ole.ClassIDFrom(SCHEDULED_TASK_PROGRAM_ID)
if err != nil {
return scheduledTasks, err

View File

@@ -3,10 +3,12 @@
package service
import (
"errors"
"fmt"
"strconv"
"strings"
"syscall"
"unsafe"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
@@ -23,16 +25,19 @@ const (
Name = "service"
FlagServiceWhereClause = "collector.service.services-where"
FlagServiceUseAPI = "collector.service.use-api"
FlagServiceCollectorV2 = "collector.service.v2"
)
type Config struct {
ServiceWhereClause string `yaml:"service_where_clause"`
UseAPI bool `yaml:"use_api"`
V2 bool `yaml:"v2"`
}
var ConfigDefaults = Config{
ServiceWhereClause: "",
UseAPI: false,
V2: false,
}
// A collector is a Prometheus collector for WMI Win32_Service metrics
@@ -41,13 +46,13 @@ type collector struct {
serviceWhereClause *string
useAPI *bool
v2 *bool
Information *prometheus.Desc
State *prometheus.Desc
StartMode *prometheus.Desc
Status *prometheus.Desc
queryWhereClause string
StateV2 *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
@@ -73,6 +78,10 @@ func NewWithFlags(app *kingpin.Application) types.Collector {
FlagServiceUseAPI,
"Use API calls to collect service data instead of WMI. Flag 'collector.service.services-where' won't be effective.",
).Default(strconv.FormatBool(ConfigDefaults.UseAPI)).Bool(),
v2: app.Flag(
FlagServiceCollectorV2,
"Enable V2 service collector. This collector can services state much more efficiently, can't provide general service information.",
).Default(strconv.FormatBool(ConfigDefaults.V2)).Bool(),
}
}
@@ -120,25 +129,37 @@ func (c *collector) Build() error {
[]string{"name", "status"},
nil,
)
c.queryWhereClause = *c.serviceWhereClause
c.StateV2 = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"The state of the service (State)",
[]string{"name", "display_name", "status"},
nil,
)
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if *c.useAPI {
if err := c.collectAPI(ch); err != nil {
var err error
switch {
case *c.useAPI:
if err = c.collectAPI(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err)
return err
}
} else {
if err := c.collectWMI(ch); err != nil {
case *c.v2:
if err = c.collectAPIV2(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err)
}
default:
if err = c.collectWMI(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting WMI service metrics:", "err", err)
return err
}
}
return nil
return err
}
// Win32_Service docs:
@@ -164,7 +185,7 @@ var (
"paused",
"unknown",
}
apiStateValues = map[uint]string{
apiStateValues = map[uint32]string{
windows.SERVICE_CONTINUE_PENDING: "continue pending",
windows.SERVICE_PAUSE_PENDING: "pause pending",
windows.SERVICE_PAUSED: "paused",
@@ -205,12 +226,12 @@ var (
func (c *collector) collectWMI(ch chan<- prometheus.Metric) error {
var dst []Win32_Service
q := wmi.QueryAllWhere(&dst, c.queryWhereClause, c.logger)
q := wmi.QueryAllWhere(&dst, *c.serviceWhereClause, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return err
}
for _, service := range dst {
pid := fmt.Sprintf("%d", uint64(service.ProcessId))
pid := strconv.FormatUint(uint64(service.ProcessId), 10)
runAs := ""
if service.StartName != nil {
@@ -319,7 +340,7 @@ func (c *collector) collectAPI(ch chan<- prometheus.Metric) error {
return
}
pid := fmt.Sprintf("%d", uint64(serviceStatus.ProcessId))
pid := strconv.FormatUint(uint64(serviceStatus.ProcessId), 10)
ch <- prometheus.MustNewConstMetric(
c.Information,
@@ -333,7 +354,7 @@ func (c *collector) collectAPI(ch chan<- prometheus.Metric) error {
for _, state := range apiStateValues {
isCurrentState := 0.0
if state == apiStateValues[uint(serviceStatus.State)] {
if state == apiStateValues[uint32(serviceStatus.State)] {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
@@ -362,3 +383,92 @@ func (c *collector) collectAPI(ch chan<- prometheus.Metric) error {
}
return nil
}
func (c *collector) collectAPIV2(ch chan<- prometheus.Metric) error {
services, err := c.queryAllServiceStates()
if err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to query services", "err", err)
return err
}
if services == nil {
_ = level.Warn(c.logger).Log("msg", "No services queried")
return nil
}
var isCurrentState float64
for _, svc := range services {
for state, stateValue := range apiStateValues {
isCurrentState = 0.0
if state == svc.ServiceStatusProcess.CurrentState {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.StateV2,
prometheus.GaugeValue,
isCurrentState,
windows.UTF16PtrToString(svc.ServiceName),
windows.UTF16PtrToString(svc.DisplayName),
stateValue,
)
}
}
return nil
}
// queryAllServiceStates returns all service states of the current Windows system
// This is realized by ask Service Manager directly.
//
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
//
// Source: https://github.com/DataDog/datadog-agent/blob/afbd8b6c87939c92610c654cb07fdfd439e4fb27/pkg/util/winutil/scmmonitor.go#L61-L96
func (c *collector) queryAllServiceStates() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
// EnumServiceStatusEx requires only SC_MANAGER_ENUM_SERVICE.
h, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE)
if err != nil {
return nil, fmt.Errorf("failed to open scm: %w", err)
}
m := &mgr.Mgr{Handle: h}
defer func() {
if err := m.Disconnect(); err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to disconnect from scm", "err", err)
}
}()
var bytesNeeded, servicesReturned uint32
var buf []byte
for {
var p *byte
if len(buf) > 0 {
p = &buf[0]
}
err = windows.EnumServicesStatusEx(m.Handle, windows.SC_ENUM_PROCESS_INFO,
windows.SERVICE_WIN32, windows.SERVICE_STATE_ALL,
p, uint32(len(buf)), &bytesNeeded, &servicesReturned, nil, nil)
if err == nil {
break
}
if !errors.Is(err, windows.ERROR_MORE_DATA) {
return nil, fmt.Errorf("failed to enum services %w", err)
}
if bytesNeeded <= uint32(len(buf)) {
return nil, err
}
buf = make([]byte, bytesNeeded)
}
if servicesReturned == 0 {
return nil, nil
}
services := unsafe.Slice((*windows.ENUM_SERVICE_STATUS_PROCESS)(unsafe.Pointer(&buf[0])), servicesReturned)
return services, nil
}

View File

@@ -0,0 +1,447 @@
//go:build windows
package smbclient
import (
"fmt"
"os"
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "smbclient"
FlagSmbClientListAllCollectors = "collectors.smbclient.list"
FlagSmbClientCollectorsEnabled = "collectors.smbclient.enabled"
)
type Config struct {
CollectorsEnabled string `yaml:"collectors_enabled"`
}
var ConfigDefaults = Config{
CollectorsEnabled: "",
}
type collector struct {
logger log.Logger
smbclientListAllCollectors *bool
smbclientCollectorsEnabled *string
ReadRequestQueueSecsTotal *prometheus.Desc
ReadBytesTotal *prometheus.Desc
ReadsTotal *prometheus.Desc
ReadBytesTransmittedViaSMBDirectTotal *prometheus.Desc
ReadRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
TurboIOReadsTotal *prometheus.Desc
ReadSecsTotal *prometheus.Desc
WriteRequestQueueSecsTotal *prometheus.Desc
WriteBytesTotal *prometheus.Desc
WritesTotal *prometheus.Desc
WriteBytesTransmittedViaSMBDirectTotal *prometheus.Desc
WriteRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
TurboIOWritesTotal *prometheus.Desc
WriteSecsTotal *prometheus.Desc
RequestQueueSecsTotal *prometheus.Desc
RequestSecs *prometheus.Desc
CreditStallsTotal *prometheus.Desc
CurrentDataQueued *prometheus.Desc
DataBytesTotal *prometheus.Desc
DataRequestsTotal *prometheus.Desc
MetadataRequestsTotal *prometheus.Desc
enabledCollectors []string
}
// All available collector functions
var smbclientAllCollectorNames = []string{
"ClientShares",
}
func New(logger log.Logger, config *Config) types.Collector {
if config == nil {
config = &ConfigDefaults
}
smbclientListAllCollectors := false
c := &collector{
smbclientCollectorsEnabled: &config.CollectorsEnabled,
smbclientListAllCollectors: &smbclientListAllCollectors,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
smbclientListAllCollectors: app.Flag(
FlagSmbClientListAllCollectors,
"List the collectors along with their perflib object name/ids",
).Bool(),
smbclientCollectorsEnabled: app.Flag(
FlagSmbClientCollectorsEnabled,
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(ConfigDefaults.CollectorsEnabled).String(),
}
}
func (c *collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
return []string{
"SMB Client Shares",
}, nil
}
func (c *collector) Build() error {
// desc creates a new prometheus description
desc := func(metricName string, description string, labels []string) *prometheus.Desc {
return prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "smbclient", metricName),
description,
labels,
nil,
)
}
c.RequestQueueSecsTotal = desc("data_queue_seconds_total",
"Seconds requests waited on queue on this share",
[]string{"server", "share"},
)
c.ReadRequestQueueSecsTotal = desc("read_queue_seconds_total",
"Seconds read requests waited on queue on this share",
[]string{"server", "share"},
)
c.WriteRequestQueueSecsTotal = desc("write_queue_seconds_total",
"Seconds write requests waited on queue on this share",
[]string{"server", "share"},
)
c.RequestSecs = desc("request_seconds_total",
"Seconds waiting for requests on this share",
[]string{"server", "share"},
)
c.CreditStallsTotal = desc("stalls_total",
"The number of requests delayed based on insufficient credits on this share",
[]string{"server", "share"},
)
c.CurrentDataQueued = desc("requests_queued",
"The point in time number of requests outstanding on this share",
[]string{"server", "share"},
)
c.DataBytesTotal = desc("data_bytes_total",
"The bytes read or written on this share",
[]string{"server", "share"},
)
c.DataRequestsTotal = desc("requests_total",
"The requests on this share",
[]string{"server", "share"},
)
c.MetadataRequestsTotal = desc("metadata_requests_total",
"The metadata requests on this share",
[]string{"server", "share"},
)
c.ReadBytesTransmittedViaSMBDirectTotal = desc("read_bytes_via_smbdirect_total",
"The bytes read from this share via RDMA direct placement",
[]string{"server", "share"},
)
c.ReadBytesTotal = desc("read_bytes_total",
"The bytes read on this share",
[]string{"server", "share"},
)
c.ReadRequestsTransmittedViaSMBDirectTotal = desc("read_requests_via_smbdirect_total",
"The read requests on this share via RDMA direct placement",
[]string{"server", "share"},
)
c.ReadsTotal = desc("read_requests_total",
"The read requests on this share",
[]string{"server", "share"},
)
c.TurboIOReadsTotal = desc("turbo_io_reads_total",
"The read requests that go through Turbo I/O",
[]string{"server", "share"},
)
c.TurboIOWritesTotal = desc("turbo_io_writes_total",
"The write requests that go through Turbo I/O",
[]string{"server", "share"},
)
c.WriteBytesTransmittedViaSMBDirectTotal = desc("write_bytes_via_smbdirect_total",
"The written bytes to this share via RDMA direct placement",
[]string{"server", "share"},
)
c.WriteBytesTotal = desc("write_bytes_total",
"The bytes written on this share",
[]string{"server", "share"},
)
c.WriteRequestsTransmittedViaSMBDirectTotal = desc("write_requests_via_smbdirect_total",
"The write requests to this share via RDMA direct placement",
[]string{"server", "share"},
)
c.WritesTotal = desc("write_requests_total",
"The write requests on this share",
[]string{"server", "share"},
)
c.ReadSecsTotal = desc("read_seconds_total",
"Seconds waiting for read requests on this share",
[]string{"server", "share"},
)
c.WriteSecsTotal = desc("write_seconds_total",
"Seconds waiting for write requests on this share",
[]string{"server", "share"},
)
c.enabledCollectors = make([]string, 0, len(smbclientAllCollectorNames))
collectorDesc := map[string]string{
"ClientShares": "SMB Client Shares",
}
if *c.smbclientListAllCollectors {
fmt.Printf("%-32s %-32s\n", "Collector Name", "Perflib Object")
for _, cname := range smbclientAllCollectorNames {
fmt.Printf("%-32s %-32s\n", cname, collectorDesc[cname])
}
os.Exit(0)
}
if *c.smbclientCollectorsEnabled == "" {
for _, collectorName := range smbclientAllCollectorNames {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
}
} else {
for _, collectorName := range strings.Split(*c.smbclientCollectorsEnabled, ",") {
if slices.Contains(smbclientAllCollectorNames, collectorName) {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
} else {
return fmt.Errorf("unknown smbclient collector: %s", collectorName)
}
}
}
return nil
}
// Collect collects smb client metrics and sends them to prometheus
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error{
"ClientShares": c.collectClientShares,
}
for _, collectorName := range c.enabledCollectors {
if err := collectorFuncs[collectorName](ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err)
return err
}
}
return nil
}
// Perflib: SMB Client Shares
type perflibClientShares struct {
Name string
AvgDataQueueLength float64 `perflib:"Avg. Data Queue Length"`
AvgReadQueueLength float64 `perflib:"Avg. Read Queue Length"`
AvgSecPerRead float64 `perflib:"Avg. sec/Read"`
AvgSecPerWrite float64 `perflib:"Avg. sec/Write"`
AvgSecPerDataRequest float64 `perflib:"Avg. sec/Data Request"`
AvgWriteQueueLength float64 `perflib:"Avg. Write Queue Length"`
CreditStallsPerSec float64 `perflib:"Credit Stalls/sec"`
CurrentDataQueueLength float64 `perflib:"Current Data Queue Length"`
DataBytesPerSec float64 `perflib:"Data Bytes/sec"`
DataRequestsPerSec float64 `perflib:"Data Requests/sec"`
MetadataRequestsPerSec float64 `perflib:"Metadata Requests/sec"`
ReadBytesTransmittedViaSMBDirectPerSec float64 `perflib:"Read Bytes transmitted via SMB Direct/sec"`
ReadBytesPerSec float64 `perflib:"Read Bytes/sec"`
ReadRequestsTransmittedViaSMBDirectPerSec float64 `perflib:"Read Requests transmitted via SMB Direct/sec"`
ReadRequestsPerSec float64 `perflib:"Read Requests/sec"`
TurboIOReadsPerSec float64 `perflib:"Turbo I/O Reads/sec"`
TurboIOWritesPerSec float64 `perflib:"Turbo I/O Writes/sec"`
WriteBytesTransmittedViaSMBDirectPerSec float64 `perflib:"Write Bytes transmitted via SMB Direct/sec"`
WriteBytesPerSec float64 `perflib:"Write Bytes/sec"`
WriteRequestsTransmittedViaSMBDirectPerSec float64 `perflib:"Write Requests transmitted via SMB Direct/sec"`
WriteRequestsPerSec float64 `perflib:"Write Requests/sec"`
}
func (c *collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibClientShares
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Client Shares"], &data, c.logger); err != nil {
return err
}
for _, instance := range data {
if instance.Name == "_Total" {
continue
}
parsed := strings.FieldsFunc(instance.Name, func(r rune) bool { return r == '\\' })
serverValue := parsed[0]
shareValue := parsed[1]
// Request time spent on queue. Convert from ticks to seconds.
ch <- prometheus.MustNewConstMetric(
c.RequestQueueSecsTotal,
prometheus.CounterValue,
instance.AvgDataQueueLength*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
// Read time spent on queue. Convert from ticks to seconds.
ch <- prometheus.MustNewConstMetric(
c.ReadRequestQueueSecsTotal,
prometheus.CounterValue,
instance.AvgReadQueueLength*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadSecsTotal,
prometheus.CounterValue,
instance.AvgSecPerRead*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WriteSecsTotal,
prometheus.CounterValue,
instance.AvgSecPerWrite*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.RequestSecs,
prometheus.CounterValue,
instance.AvgSecPerDataRequest*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
// Write time spent on queue. Convert from ticks to seconds.
ch <- prometheus.MustNewConstMetric(
c.WriteRequestQueueSecsTotal,
prometheus.CounterValue,
instance.AvgWriteQueueLength*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.CreditStallsTotal,
prometheus.CounterValue,
instance.CreditStallsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentDataQueued,
prometheus.GaugeValue,
instance.CurrentDataQueueLength,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.DataBytesTotal,
prometheus.CounterValue,
instance.DataBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.DataRequestsTotal,
prometheus.CounterValue,
instance.DataRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.MetadataRequestsTotal,
prometheus.CounterValue,
instance.MetadataRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.ReadBytesTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTotal,
prometheus.CounterValue,
instance.ReadBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadRequestsTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.ReadRequestsTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadsTotal,
prometheus.CounterValue,
instance.ReadRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.TurboIOReadsTotal,
prometheus.CounterValue,
instance.TurboIOReadsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.TurboIOWritesTotal,
prometheus.CounterValue,
instance.TurboIOWritesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.WriteBytesTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTotal,
prometheus.CounterValue,
instance.WriteBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WriteRequestsTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.WriteRequestsTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WritesTotal,
prometheus.CounterValue,
instance.WriteRequestsPerSec,
serverValue, shareValue,
)
}
return nil
}

View File

@@ -0,0 +1,12 @@
package smbclient_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
"github.com/prometheus-community/windows_exporter/pkg/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, smbclient.Name, smbclient.NewWithFlags)
}

View File

@@ -399,8 +399,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting smtp metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting smtp metrics", "err", err)
return err
}
return nil
@@ -454,10 +454,10 @@ type PerflibSMTPServer struct {
RoutingTableLookupsTotal float64 `perflib:"Routing Table Lookups Total"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibSMTPServer
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMTP Server"], &dst, c.logger); err != nil {
return nil, err
return err
}
for _, server := range dst {
@@ -755,5 +755,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
)
}
return nil, nil
return nil
}

View File

@@ -94,8 +94,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting system metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting system metrics", "err", err)
return err
}
return nil
@@ -112,10 +112,10 @@ type system struct {
Threads float64 `perflib:"Threads"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []system
if err := perflib.UnmarshalObject(ctx.PerfObjects["System"], &dst, c.logger); err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -148,5 +148,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
prometheus.GaugeValue,
dst[0].Threads,
)
return nil, nil
return nil
}

View File

@@ -115,8 +115,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting tcp metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting tcp metrics", "err", err)
return err
}
return nil
@@ -194,12 +194,12 @@ func writeTCPCounters(metrics tcp, labels []string, c *collector, ch chan<- prom
)
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []tcp
// TCPv4 counters
if err := perflib.UnmarshalObject(ctx.PerfObjects["TCPv4"], &dst, c.logger); err != nil {
return nil, err
return err
}
if len(dst) != 0 {
writeTCPCounters(dst[0], []string{"ipv4"}, c, ch)
@@ -207,11 +207,11 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// TCPv6 counters
if err := perflib.UnmarshalObject(ctx.PerfObjects["TCPv6"], &dst, c.logger); err != nil {
return nil, err
return err
}
if len(dst) != 0 {
writeTCPCounters(dst[0], []string{"ipv6"}, c, ch)
}
return nil, nil
return nil
}

View File

@@ -326,24 +326,24 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectAudio(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting teradici session audio metrics", "desc", desc, "err", err)
if err := c.collectAudio(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session audio metrics", "err", err)
return err
}
if desc, err := c.collectGeneral(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting teradici session general metrics", "desc", desc, "err", err)
if err := c.collectGeneral(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session general metrics", "err", err)
return err
}
if desc, err := c.collectImaging(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting teradici session imaging metrics", "desc", desc, "err", err)
if err := c.collectImaging(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session imaging metrics", "err", err)
return err
}
if desc, err := c.collectNetwork(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting teradici session network metrics", "desc", desc, "err", err)
if err := c.collectNetwork(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session network metrics", "err", err)
return err
}
if desc, err := c.collectUsb(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting teradici session USB metrics", "desc", desc, "err", err)
if err := c.collectUsb(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session USB metrics", "err", err)
return err
}
return nil
@@ -401,14 +401,14 @@ type win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics struct {
USBTXBWkbitPersec uint64
}
func (c *collector) collectAudio(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectAudio(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -441,17 +441,17 @@ func (c *collector) collectAudio(ch chan<- prometheus.Metric) (*prometheus.Desc,
float64(dst[0].AudioTXBWLimitkbitPersec),
)
return nil, nil
return nil
}
func (c *collector) collectGeneral(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectGeneral(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -496,17 +496,17 @@ func (c *collector) collectGeneral(ch chan<- prometheus.Metric) (*prometheus.Des
float64(dst[0].TXPacketsLost),
)
return nil, nil
return nil
}
func (c *collector) collectImaging(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectImaging(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -575,17 +575,17 @@ func (c *collector) collectImaging(ch chan<- prometheus.Metric) (*prometheus.Des
float64(dst[0].ImagingTXBWkbitPersec),
)
return nil, nil
return nil
}
func (c *collector) collectNetwork(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectNetwork(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -648,17 +648,17 @@ func (c *collector) collectNetwork(ch chan<- prometheus.Metric) (*prometheus.Des
float64(dst[0].TXPacketLossPercent_Base),
)
return nil, nil
return nil
}
func (c *collector) collectUsb(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectUsb(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -685,5 +685,5 @@ func (c *collector) collectUsb(ch chan<- prometheus.Metric) (*prometheus.Desc, e
float64(dst[0].USBTXBWkbitPersec),
)
return nil, nil
return nil
}

View File

@@ -4,11 +4,14 @@ package terminal_services
import (
"errors"
"fmt"
"strings"
"syscall"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/headers/wtsapi32"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
@@ -52,15 +55,16 @@ type collector struct {
connectionBrokerEnabled bool
hServer syscall.Handle
SessionInfo *prometheus.Desc
LocalSessionCount *prometheus.Desc
ConnectionBrokerPerformance *prometheus.Desc
HandleCount *prometheus.Desc
PageFaultsPersec *prometheus.Desc
PageFileBytes *prometheus.Desc
PageFileBytesPeak *prometheus.Desc
PercentPrivilegedTime *prometheus.Desc
PercentProcessorTime *prometheus.Desc
PercentUserTime *prometheus.Desc
PercentCPUTime *prometheus.Desc
PoolNonpagedBytes *prometheus.Desc
PoolPagedBytes *prometheus.Desc
PrivateBytes *prometheus.Desc
@@ -91,7 +95,6 @@ func (c *collector) SetLogger(logger log.Logger) {
func (c *collector) GetPerfCounter() ([]string, error) {
return []string{
"Terminal Services",
"Terminal Services Session",
"Remote Desktop Connection Broker Counterset",
}, nil
@@ -100,10 +103,10 @@ func (c *collector) GetPerfCounter() ([]string, error) {
func (c *collector) Build() error {
c.connectionBrokerEnabled = isConnectionBrokerServer(c.logger)
c.LocalSessionCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "local_session_count"),
"Number of Terminal Services sessions",
[]string{"session"},
c.SessionInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "session_info"),
"Terminal Services sessions info",
[]string{"session_name", "user", "host", "state"},
nil,
)
c.ConnectionBrokerPerformance = prometheus.NewDesc(
@@ -136,22 +139,10 @@ func (c *collector) Build() error {
[]string{"session_name"},
nil,
)
c.PercentPrivilegedTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "privileged_time_seconds_total"),
"Total elapsed time that the threads of the process have spent executing code in privileged mode.",
[]string{"session_name"},
nil,
)
c.PercentProcessorTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processor_time_seconds_total"),
"Total elapsed time that all of the threads of this process used the processor to execute instructions.",
[]string{"session_name"},
nil,
)
c.PercentUserTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "user_time_seconds_total"),
"Total elapsed time that this process's threads have spent executing code in user mode. Applications, environment Names, and integral Names execute in user mode.",
[]string{"session_name"},
c.PercentCPUTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_time_seconds_total"),
"Total elapsed time that this process's threads have spent executing code.",
[]string{"mode", "session_name"},
nil,
)
c.PoolNonpagedBytes = prometheus.NewDesc(
@@ -202,71 +193,39 @@ func (c *collector) Build() error {
[]string{"session_name"},
nil,
)
var err error
c.hServer, err = wtsapi32.WTSOpenServer("")
if err != nil {
return fmt.Errorf("failed to open WTS server: %w", err)
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectTSSessionCount(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
if err := c.collectWTSSessions(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session infos", "err", err)
return err
}
if desc, err := c.collectTSSessionCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
if err := c.collectTSSessionCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session count metrics", "err", err)
return err
}
// only collect CollectionBrokerPerformance if host is a Connection Broker
if c.connectionBrokerEnabled {
if desc, err := c.collectCollectionBrokerPerformanceCounter(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting Connection Broker performance metrics", "desc", desc, "err", err)
if err := c.collectCollectionBrokerPerformanceCounter(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting Connection Broker performance metrics", "err", err)
return err
}
}
return nil
}
type perflibTerminalServices struct {
ActiveSessions float64 `perflib:"Active Sessions"`
InactiveSessions float64 `perflib:"Inactive Sessions"`
TotalSessions float64 `perflib:"Total Sessions"`
}
func (c *collector) collectTSSessionCount(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibTerminalServices, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Terminal Services"], &dst, c.logger)
if err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.LocalSessionCount,
prometheus.GaugeValue,
dst[0].ActiveSessions,
"active",
)
ch <- prometheus.MustNewConstMetric(
c.LocalSessionCount,
prometheus.GaugeValue,
dst[0].InactiveSessions,
"inactive",
)
ch <- prometheus.MustNewConstMetric(
c.LocalSessionCount,
prometheus.GaugeValue,
dst[0].TotalSessions,
"total",
)
return nil, nil
}
type perflibTerminalServicesSession struct {
Name string
HandleCount float64 `perflib:"Handle Count"`
@@ -286,11 +245,11 @@ type perflibTerminalServicesSession struct {
WorkingSetPeak float64 `perflib:"Working Set Peak"`
}
func (c *collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibTerminalServicesSession, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Terminal Services Session"], &dst, c.logger)
if err != nil {
return nil, err
return err
}
names := make(map[string]bool)
@@ -331,22 +290,25 @@ func (c *collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<-
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PercentPrivilegedTime,
c.PercentCPUTime,
prometheus.CounterValue,
d.PercentPrivilegedTime,
d.Name,
"privileged",
)
ch <- prometheus.MustNewConstMetric(
c.PercentProcessorTime,
c.PercentCPUTime,
prometheus.CounterValue,
d.PercentProcessorTime,
d.Name,
"processor",
)
ch <- prometheus.MustNewConstMetric(
c.PercentUserTime,
c.PercentCPUTime,
prometheus.CounterValue,
d.PercentUserTime,
d.Name,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.PoolNonpagedBytes,
@@ -397,7 +359,7 @@ func (c *collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<-
d.Name,
)
}
return nil, nil
return nil
}
type perflibRemoteDesktopConnectionBrokerCounterset struct {
@@ -406,14 +368,14 @@ type perflibRemoteDesktopConnectionBrokerCounterset struct {
FailedConnections float64 `perflib:"Failed Connections"`
}
func (c *collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibRemoteDesktopConnectionBrokerCounterset, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Remote Desktop Connection Broker Counterset"], &dst, c.logger)
if err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -437,5 +399,38 @@ func (c *collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeC
"Failed",
)
return nil, nil
return nil
}
func (c *collector) collectWTSSessions(ch chan<- prometheus.Metric) error {
sessions, err := wtsapi32.WTSEnumerateSessionsEx(c.hServer, c.logger)
if err != nil {
return fmt.Errorf("failed to enumerate WTS sessions: %w", err)
}
for _, session := range sessions {
userName := session.UserName
if session.DomainName != "" {
userName = fmt.Sprintf("%s\\%s", session.DomainName, session.UserName)
}
for stateID, stateName := range wtsapi32.WTSSessionStates {
isState := 0.0
if session.State == stateID {
isState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.SessionInfo,
prometheus.GaugeValue,
isState,
strings.Replace(session.SessionName, "#", " ", -1),
userName,
session.HostName,
stateName,
)
}
}
return nil
}

View File

@@ -8,5 +8,7 @@ import (
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, terminal_services.Name, terminal_services.NewWithFlags)
}

View File

@@ -16,6 +16,7 @@
package textfile
import (
"errors"
"fmt"
"io"
"os"
@@ -100,7 +101,7 @@ func (c *collector) Build() error {
c.directories = strings.Trim(*c.textFileDirectories, ",")
}
_ = level.Info(c.logger).Log("msg", fmt.Sprintf("textfile collector directories: %s", c.directories))
_ = level.Info(c.logger).Log("msg", "textfile collector directories: "+c.directories)
c.MtimeDesc = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "textfile", "mtime_seconds"),
@@ -296,12 +297,12 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
for _, directory := range strings.Split(c.directories, ",") {
err := filepath.WalkDir(directory, func(path string, dirEntry os.DirEntry, err error) error {
if err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error reading directory: %s", path), "err", err)
_ = level.Error(c.logger).Log("msg", "Error reading directory: "+path, "err", err)
errorMetric = 1.0
return nil
}
if !dirEntry.IsDir() && strings.HasSuffix(dirEntry.Name(), ".prom") {
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("Processing file: %s", path))
_ = level.Debug(c.logger).Log("msg", "Processing file: "+path)
families_array, err := scrapeFile(path, c.logger)
if err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error scraping file: %q. Skip File.", path), "err", err)
@@ -325,7 +326,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil
})
if err != nil && directory != "" {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error reading textfile collector directory: %s", c.directories), "err", err)
_ = level.Error(c.logger).Log("msg", "Error reading textfile collector directory: "+c.directories, "err", err)
errorMetric = 1.0
}
}
@@ -373,24 +374,24 @@ func scrapeFile(path string, log log.Logger) ([]*dto.MetricFamily, error) {
}
// Use temporary array to check for duplicates
var families_array []*dto.MetricFamily
families_array := make([]*dto.MetricFamily, 0, len(parsedFamilies))
for _, mf := range parsedFamilies {
families_array = append(families_array, mf)
for _, m := range mf.Metric {
if m.TimestampMs != nil {
return nil, fmt.Errorf("textfile contains unsupported client-side timestamps")
return nil, errors.New("textfile contains unsupported client-side timestamps")
}
}
if mf.Help == nil {
help := fmt.Sprintf("Metric read from %s", path)
help := "Metric read from " + path
mf.Help = &help
}
}
// If duplicate metrics are detected in a *single* file, skip processing of file metrics
if duplicateMetricEntry(families_array) {
return nil, fmt.Errorf("duplicate metrics detected")
return nil, errors.New("duplicate metrics detected")
}
return families_array, nil
}
@@ -400,7 +401,7 @@ func checkBOM(encoding utfbom.Encoding) error {
return nil
}
return fmt.Errorf(encoding.String())
return errors.New(encoding.String())
}
func getDefaultPath() string {

View File

@@ -81,8 +81,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting thermalzone metrics", "desc", desc, "err", err)
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting thermalzone metrics", "err", err)
return err
}
return nil
@@ -98,16 +98,16 @@ type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
ThrottleReasons uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
// ThermalZone collector has been known to 'successfully' return an empty result.
if len(dst) == 0 {
return nil, errors.New("Empty results set for collector")
return errors.New("Empty results set for collector")
}
for _, info := range dst {
@@ -134,5 +134,5 @@ func (c *collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, erro
)
}
return nil, nil
return nil
}

View File

@@ -101,8 +101,8 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting time metrics", "desc", desc, "err", err)
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting time metrics", "err", err)
return err
}
return nil
@@ -118,10 +118,10 @@ type windowsTime struct {
NTPServerOutgoingResponsesTotal float64 `perflib:"NTP Server Outgoing Responses"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []windowsTime // Single-instance class, array is required but will have single entry.
if err := perflib.UnmarshalObject(ctx.PerfObjects["Windows Time Service"], &dst, c.logger); err != nil {
return nil, err
return err
}
ch <- prometheus.MustNewConstMetric(
@@ -154,5 +154,5 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
prometheus.CounterValue,
dst[0].NTPServerOutgoingResponsesTotal,
)
return nil, nil
return nil
}

View File

@@ -190,12 +190,12 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectMem(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware memory metrics", "desc", desc, "err", err)
if err := c.collectMem(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware memory metrics", "err", err)
return err
}
if desc, err := c.collectCpu(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware cpu metrics", "desc", desc, "err", err)
if err := c.collectCpu(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware cpu metrics", "err", err)
return err
}
return nil
@@ -226,14 +226,14 @@ type Win32_PerfRawData_vmGuestLib_VCPU struct {
HostProcessorSpeedMHz uint64
}
func (c *collector) collectMem(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectMem(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_vmGuestLib_VMem
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -308,21 +308,21 @@ func (c *collector) collectMem(ch chan<- prometheus.Metric) (*prometheus.Desc, e
mbToBytes(dst[0].MemUsedMB),
)
return nil, nil
return nil
}
func mbToBytes(mb uint64) float64 {
return float64(mb * 1024 * 1024)
}
func (c *collector) collectCpu(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectCpu(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_vmGuestLib_VCPU
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
return errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
@@ -367,5 +367,5 @@ func (c *collector) collectCpu(ch chan<- prometheus.Metric) (*prometheus.Desc, e
float64(dst[0].HostProcessorSpeedMHz),
)
return nil, nil
return nil
}

View File

@@ -570,52 +570,52 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectAudio(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast audio metrics", "desc", desc, "err", err)
if err := c.collectAudio(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast audio metrics", "err", err)
return err
}
if desc, err := c.collectCdr(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast CDR metrics", "desc", desc, "err", err)
if err := c.collectCdr(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast CDR metrics", "err", err)
return err
}
if desc, err := c.collectClipboard(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast clipboard metrics", "desc", desc, "err", err)
if err := c.collectClipboard(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast clipboard metrics", "err", err)
return err
}
if desc, err := c.collectHtml5Mmr(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast HTML5 MMR metrics", "desc", desc, "err", err)
if err := c.collectHtml5Mmr(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast HTML5 MMR metrics", "err", err)
return err
}
if desc, err := c.collectImaging(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast imaging metrics", "desc", desc, "err", err)
if err := c.collectImaging(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast imaging metrics", "err", err)
return err
}
if desc, err := c.collectRtav(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast RTAV metrics", "desc", desc, "err", err)
if err := c.collectRtav(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast RTAV metrics", "err", err)
return err
}
if desc, err := c.collectSerialPortandScanner(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast serial port and scanner metrics", "desc", desc, "err", err)
if err := c.collectSerialPortandScanner(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast serial port and scanner metrics", "err", err)
return err
}
if desc, err := c.collectSession(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast metrics", "desc", desc, "err", err)
if err := c.collectSession(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast metrics", "err", err)
return err
}
if desc, err := c.collectSkypeforBusinessControl(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast skype for business control metrics", "desc", desc, "err", err)
if err := c.collectSkypeforBusinessControl(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast skype for business control metrics", "err", err)
return err
}
if desc, err := c.collectThinPrint(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast thin print metrics", "desc", desc, "err", err)
if err := c.collectThinPrint(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast thin print metrics", "err", err)
return err
}
if desc, err := c.collectUsb(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast USB metrics", "desc", desc, "err", err)
if err := c.collectUsb(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast USB metrics", "err", err)
return err
}
if desc, err := c.collectWindowsMediaMmr(ch); err != nil {
_ = level.Error(c.logger).Log("failed collecting vmware blast windows media MMR metrics", "desc", desc, "err", err)
if err := c.collectWindowsMediaMmr(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast windows media MMR metrics", "err", err)
return err
}
return nil
@@ -726,16 +726,16 @@ type win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters struct {
TransmittedPackets uint32
}
func (c *collector) collectAudio(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectAudio(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastAudioCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -762,19 +762,19 @@ func (c *collector) collectAudio(ch chan<- prometheus.Metric) (*prometheus.Desc,
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectCdr(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectCdr(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastCDRCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -801,19 +801,19 @@ func (c *collector) collectCdr(ch chan<- prometheus.Metric) (*prometheus.Desc, e
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectClipboard(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectClipboard(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastClipboardCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -840,19 +840,19 @@ func (c *collector) collectClipboard(ch chan<- prometheus.Metric) (*prometheus.D
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectHtml5Mmr(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectHtml5Mmr(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastHTML5MMRcounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -879,19 +879,19 @@ func (c *collector) collectHtml5Mmr(ch chan<- prometheus.Metric) (*prometheus.De
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectImaging(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectImaging(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastImagingCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -966,19 +966,19 @@ func (c *collector) collectImaging(ch chan<- prometheus.Metric) (*prometheus.Des
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectRtav(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectRtav(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastRTAVCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -1005,19 +1005,19 @@ func (c *collector) collectRtav(ch chan<- prometheus.Metric) (*prometheus.Desc,
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectSerialPortandScanner(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectSerialPortandScanner(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastSerialPortandScannerCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -1044,19 +1044,19 @@ func (c *collector) collectSerialPortandScanner(ch chan<- prometheus.Metric) (*p
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectSession(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectSession(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastSessionCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -1161,19 +1161,19 @@ func (c *collector) collectSession(ch chan<- prometheus.Metric) (*prometheus.Des
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectSkypeforBusinessControl(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectSkypeforBusinessControl(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastSkypeforBusinessControlCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -1200,19 +1200,19 @@ func (c *collector) collectSkypeforBusinessControl(ch chan<- prometheus.Metric)
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectThinPrint(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectThinPrint(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastThinPrintCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -1239,19 +1239,19 @@ func (c *collector) collectThinPrint(ch chan<- prometheus.Metric) (*prometheus.D
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectUsb(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectUsb(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastUSBCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -1278,19 +1278,19 @@ func (c *collector) collectUsb(ch chan<- prometheus.Metric) (*prometheus.Desc, e
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}
func (c *collector) collectWindowsMediaMmr(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
func (c *collector) collectWindowsMediaMmr(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
return err
}
if len(dst) == 0 {
// It's possible for these classes to legitimately return null when queried
return nil, nil
return nil
}
ch <- prometheus.MustNewConstMetric(
@@ -1317,5 +1317,5 @@ func (c *collector) collectWindowsMediaMmr(ch chan<- prometheus.Metric) (*promet
float64(dst[0].TransmittedPackets),
)
return nil, nil
return nil
}

View File

@@ -1,6 +1,9 @@
package config
import "fmt"
import (
"fmt"
"strconv"
)
// flatten flattens the nested struct.
//
@@ -46,7 +49,7 @@ func flattenSlice(data []interface{}) map[string]string {
ret[fmt.Sprintf("%d,%s", idx, fk)] = fv
}
default:
ret[fmt.Sprint(idx)] = fmt.Sprint(typed)
ret[strconv.Itoa(idx)] = fmt.Sprint(typed)
}
}
return ret

View File

@@ -0,0 +1,52 @@
package kernel32
import (
"syscall"
"unsafe"
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetDynamicTimeZoneInformationSys = kernel32.NewProc("GetDynamicTimeZoneInformation")
)
// SYSTEMTIME contains a date and time.
// 📑 https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-systemtime
type SYSTEMTIME struct {
WYear uint16
WMonth uint16
WDayOfWeek uint16
WDay uint16
WHour uint16
WMinute uint16
WSecond uint16
WMilliseconds uint16
}
// DynamicTimezoneInformation contains the current dynamic daylight time settings.
// 📑 https://docs.microsoft.com/en-us/windows/win32/api/timezoneapi/ns-timezoneapi-dynamic_time_zone_information
type DynamicTimezoneInformation struct {
Bias int32
standardName [32]uint16
StandardDate SYSTEMTIME
StandardBias int32
DaylightName [32]uint16
DaylightDate SYSTEMTIME
DaylightBias int32
TimeZoneKeyName [128]uint16
DynamicDaylightTimeDisabled uint8 // BOOLEAN
}
// GetDynamicTimeZoneInformation retrieves the current dynamic daylight time settings.
// 📑 https://docs.microsoft.com/en-us/windows/win32/api/timezoneapi/nf-timezoneapi-getdynamictimezoneinformation
func GetDynamicTimeZoneInformation() (DynamicTimezoneInformation, error) {
var tzi DynamicTimezoneInformation
r0, _, err := syscall.SyscallN(procGetDynamicTimeZoneInformationSys.Addr(), uintptr(unsafe.Pointer(&tzi)))
if uint32(r0) == 0xffffffff {
return tzi, err
}
return tzi, nil
}

40
pkg/headers/slc/slc.go Normal file
View File

@@ -0,0 +1,40 @@
package slc
import (
"errors"
"unsafe"
"golang.org/x/sys/windows"
)
var (
slc = windows.NewLazySystemDLL("slc.dll")
procSLIsWindowsGenuineLocal = slc.NewProc("SLIsWindowsGenuineLocal")
)
// Define SL_GENUINE_STATE enumeration
// https://learn.microsoft.com/en-us/windows/win32/api/slpublic/ne-slpublic-sl_genuine_state
type SL_GENUINE_STATE uint32
const (
SL_GEN_STATE_IS_GENUINE SL_GENUINE_STATE = iota
SL_GEN_STATE_INVALID_LICENSE
SL_GEN_STATE_TAMPERED
SL_GEN_STATE_OFFLINE
SL_GEN_STATE_LAST
)
// SLIsWindowsGenuineLocal function wrapper
func SLIsWindowsGenuineLocal() (SL_GENUINE_STATE, error) {
var genuineState SL_GENUINE_STATE
_, _, err := procSLIsWindowsGenuineLocal.Call(
uintptr(unsafe.Pointer(&genuineState)),
)
if !errors.Is(err, windows.NTE_OP_OK) {
return 0, err
}
return genuineState, nil
}

View File

@@ -0,0 +1,198 @@
package wtsapi32
import (
"fmt"
"syscall"
"unsafe"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"golang.org/x/sys/windows"
)
type WTSTypeClass int
// The valid values for the WTSTypeClass enumeration
const (
WTSTypeProcessInfoLevel0 WTSTypeClass = iota
WTSTypeProcessInfoLevel1
WTSTypeSessionInfoLevel1
)
type WTSConnectState uint32
const (
// wtsActive A user is logged on to the WinStation. This state occurs when a user is signed in and actively connected to the device.
wtsActive WTSConnectState = iota
// wtsConnected The WinStation is connected to the client.
wtsConnected
// wtsConnectQuery The WinStation is in the process of connecting to the client.
wtsConnectQuery
// wtsShadow The WinStation is shadowing another WinStation.
wtsShadow
// wtsDisconnected The WinStation is active but the client is disconnected.
// This state occurs when a user is signed in but not actively connected to the device, such as when the user has chosen to exit to the lock screen.
wtsDisconnected
// wtsIdle The WinStation is waiting for a client to connect.
wtsIdle
// wtsListen The WinStation is listening for a connection. A listener session waits for requests for new client connections.
// No user is logged on a listener session. A listener session cannot be reset, shadowed, or changed to a regular client session.
wtsListen
// wtsReset The WinStation is being reset.
wtsReset
// wtsDown The WinStation is down due to an error.
wtsDown
// wtsInit The WinStation is initializing.
wtsInit
)
// WTSSessionInfo1w contains information about a session on a Remote Desktop Session Host (RD Session Host) server.
// docs: https://docs.microsoft.com/en-us/windows/win32/api/wtsapi32/ns-wtsapi32-wts_session_info_1w
type wtsSessionInfo1 struct {
// ExecEnvID An identifier that uniquely identifies the session within the list of sessions returned by the WTSEnumerateSessionsEx function.
ExecEnvID uint32
// State A value of the WTSConnectState enumeration type that specifies the connection state of a Remote Desktop Services session.
State uint32
// SessionID A session identifier assigned by the RD Session Host server, RD Virtualization Host server, or virtual machine.
SessionID uint32
// pSessionName A pointer to a null-terminated string that contains the name of this session. For example, "services", "console", or "RDP-Tcp#0".
pSessionName *uint16
// pHostName A pointer to a null-terminated string that contains the name of the computer that the session is running on.
// If the session is running directly on an RD Session Host server or RD Virtualization Host server, the string contains NULL.
// If the session is running on a virtual machine, the string contains the name of the virtual machine.
pHostName *uint16
// pUserName A pointer to a null-terminated string that contains the name of the user who is logged on to the session.
// If no user is logged on to the session, the string contains NULL.
pUserName *uint16
// pDomainName A pointer to a null-terminated string that contains the domain name of the user who is logged on to the session.
// If no user is logged on to the session, the string contains NULL.
pDomainName *uint16
// pFarmName A pointer to a null-terminated string that contains the name of the farm that the virtual machine is joined to.
// If the session is not running on a virtual machine that is joined to a farm, the string contains NULL.
pFarmName *uint16
}
type WTSSession struct {
ExecEnvID uint32
State WTSConnectState
SessionID uint32
SessionName string
HostName string
UserName string
DomainName string
FarmName string
}
var (
wtsapi32 = windows.NewLazySystemDLL("wtsapi32.dll")
procWTSOpenServerEx = wtsapi32.NewProc("WTSOpenServerExW")
procWTSEnumerateSessionsEx = wtsapi32.NewProc("WTSEnumerateSessionsExW")
procWTSFreeMemoryEx = wtsapi32.NewProc("WTSFreeMemoryExW")
procWTSCloseServer = wtsapi32.NewProc("WTSCloseServer")
WTSSessionStates = map[WTSConnectState]string{
wtsActive: "active",
wtsConnected: "connected",
wtsConnectQuery: "connect_query",
wtsShadow: "shadow",
wtsDisconnected: "disconnected",
wtsIdle: "idle",
wtsListen: "listen",
wtsReset: "reset",
wtsDown: "down",
wtsInit: "init",
}
)
func WTSOpenServer(server string) (syscall.Handle, error) {
var (
err error
serverName *uint16
)
if server != "" {
serverName, err = syscall.UTF16PtrFromString(server)
if err != nil {
return syscall.InvalidHandle, err
}
}
r1, _, err := procWTSOpenServerEx.Call(uintptr(unsafe.Pointer(serverName)))
serverHandle := syscall.Handle(r1)
if serverHandle == syscall.InvalidHandle {
return syscall.InvalidHandle, err
}
return serverHandle, nil
}
func WTSCloseServer(server syscall.Handle) error {
_, _, err := procWTSCloseServer.Call(uintptr(server))
if err != nil {
return fmt.Errorf("failed to close server: %w", err)
}
return err
}
func WTSFreeMemoryEx(class WTSTypeClass, pMemory uintptr, NumberOfEntries uint32) error {
_, _, err := procWTSFreeMemoryEx.Call(
uintptr(class),
pMemory,
uintptr(NumberOfEntries),
)
return err
}
func WTSEnumerateSessionsEx(server syscall.Handle, logger log.Logger) ([]WTSSession, error) {
var sessionInfoPointer uintptr
var count uint32
pLevel := uint32(1)
r1, _, err := procWTSEnumerateSessionsEx.Call(
uintptr(server),
uintptr(unsafe.Pointer(&pLevel)),
uintptr(0),
uintptr(unsafe.Pointer(&sessionInfoPointer)),
uintptr(unsafe.Pointer(&count)),
)
if r1 != 1 {
return nil, err
}
if sessionInfoPointer != 0 {
defer func(class WTSTypeClass, pMemory uintptr, NumberOfEntries uint32) {
err := WTSFreeMemoryEx(class, pMemory, NumberOfEntries)
if err != nil {
_ = level.Error(logger).Log("msg", "failed to free memory", "err", err)
}
}(WTSTypeSessionInfoLevel1, sessionInfoPointer, count)
}
var sizeTest wtsSessionInfo1
sessionSize := unsafe.Sizeof(sizeTest)
sessions := make([]WTSSession, 0, count)
for i := uint32(0); i < count; i++ {
curPtr := unsafe.Pointer(sessionInfoPointer + (uintptr(i) * sessionSize))
data := (*wtsSessionInfo1)(curPtr)
sessionInfo := WTSSession{
ExecEnvID: data.ExecEnvID,
State: WTSConnectState(data.State),
SessionID: data.SessionID,
SessionName: windows.UTF16PtrToString(data.pSessionName),
HostName: windows.UTF16PtrToString(data.pHostName),
UserName: windows.UTF16PtrToString(data.pUserName),
DomainName: windows.UTF16PtrToString(data.pDomainName),
FarmName: windows.UTF16PtrToString(data.pFarmName),
}
sessions = append(sessions, sessionInfo)
}
return sessions, nil
}

View File

@@ -1,6 +1,7 @@
package perflib
import (
"errors"
"fmt"
"reflect"
"strings"
@@ -17,7 +18,7 @@ const (
func UnmarshalObject(obj *PerfObject, vs interface{}, logger log.Logger) error {
if obj == nil {
return fmt.Errorf("counter not found")
return errors.New("counter not found")
}
rv := reflect.ValueOf(vs)
if rv.Kind() != reflect.Ptr || rv.IsNil() {

View File

@@ -2,7 +2,7 @@ package wmi
import (
"bytes"
"fmt"
"reflect"
"github.com/go-kit/log"
@@ -47,7 +47,7 @@ func QueryAll(src interface{}, logger log.Logger) string {
b.WriteString("SELECT * FROM ")
b.WriteString(className(src))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}
@@ -56,7 +56,7 @@ func QueryAllForClass(_ interface{}, class string, logger log.Logger) string {
b.WriteString("SELECT * FROM ")
b.WriteString(class)
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}
@@ -70,7 +70,7 @@ func QueryAllWhere(src interface{}, where string, logger log.Logger) string {
b.WriteString(where)
}
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}
@@ -84,6 +84,6 @@ func QueryAllForClassWhere(_ interface{}, class string, where string, logger log
b.WriteString(where)
}
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}

View File

@@ -12,13 +12,13 @@ type fakeWmiClass struct {
}
var (
mapQueryAll = func(src interface{}, class string, where string) string {
mapQueryAll = func(src interface{}, _ string, _ string) string {
return QueryAll(src, log.NewNopLogger())
}
mapQueryAllWhere = func(src interface{}, class string, where string) string {
mapQueryAllWhere = func(src interface{}, _ string, where string) string {
return QueryAllWhere(src, where, log.NewNopLogger())
}
mapQueryAllForClass = func(src interface{}, class string, where string) string {
mapQueryAllForClass = func(src interface{}, class string, _ string) string {
return QueryAllForClass(src, class, log.NewNopLogger())
}
mapQueryAllForClassWhere = func(src interface{}, class string, where string) string {

View File

@@ -63,6 +63,8 @@ windows_exporter_collector_timeout{collector="textfile"} 0
# TYPE windows_logical_disk_free_bytes gauge
# HELP windows_logical_disk_idle_seconds_total Seconds that the disk was idle (LogicalDisk.PercentIdleTime)
# TYPE windows_logical_disk_idle_seconds_total counter
# HELP windows_logical_disk_info A metric with a constant '1' value labeled with logical disk information
# TYPE windows_logical_disk_info gauge
# HELP windows_logical_disk_read_bytes_total The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec)
# TYPE windows_logical_disk_read_bytes_total counter
# HELP windows_logical_disk_read_latency_seconds_total Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)