Compare commits

...

44 Commits

Author SHA1 Message Date
Jan-Otto Kröpke
d451acbd63 [0.30] fix: Avoid COINIT_MULTITHREADED in CoInitializeEx (#2066) (#2091) 2025-06-21 11:29:06 +02:00
Jan-Otto Kröpke
7c14a79ef2 [0.30] service: report invalid parameter errors as debug (#2051) (#2092) 2025-06-21 11:28:48 +02:00
Jan-Otto Kröpke
3d7b16d61d [0.30] fix: added count checks (#2083) (#2089) 2025-06-21 11:28:30 +02:00
Jan-Otto Kröpke
a3131dc087 [0.30] logical_disk: skip unmounted volumes (#2084) (#2090)
Co-authored-by: Nic Jansma <nic@nicj.net>
2025-06-21 11:28:16 +02:00
Karl Persson
93940569fa update: export properties so that they can be read from yaml file (#2054) 2025-05-22 16:33:56 +02:00
Jan-Otto Kröpke
1e24d7b2c9 dns: add enhanced metrics (#1999) (#2040)
Co-authored-by: Matthew Wimpelberg <120263653+mwimpelberg28@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-17 14:15:07 +02:00
Jan-Otto Kröpke
109f537c14 terminal_services: Expose disconnected sessions agains (#2026) (#2039) 2025-05-17 14:14:54 +02:00
Jan-Otto Kröpke
62b796e6f6 exchange: fix The specified counter could not be found (#1994) (#2038) 2025-05-17 14:12:43 +02:00
Jan-Otto Kröpke
8bae1abe20 fix: Support running as Windows Service within containers [0.30.x] (#2009) 2025-04-24 10:57:58 +02:00
Jan-Otto Kröpke
db60c78f32 Release 0.30.6 (#1977) 2025-04-06 12:27:19 +02:00
Jan-Otto Kröpke
bdd7725f17 chore: CI fixes
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-04-06 12:21:00 +02:00
Jan-Otto Kröpke
9ed3769765 chore: CI fixes
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-04-06 12:08:43 +02:00
Jan-Otto Kröpke
aa7157e27c system: Metric windows_system_boot_time_timestamp returns a UNIX timestamp again. (#1967)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
(cherry picked from commit ba605cffcc)
2025-04-06 11:57:42 +02:00
Jan-Otto Kröpke
13d5e1cd12 chore: CI fixes
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-04-06 11:55:38 +02:00
Jan-Otto Kröpke
2c4698f119 [0.30] support web.listen-addr from CLI (#3)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2025-04-06 03:36:58 +02:00
Jan-Otto Kröpke
759faee1c3 mssql: support initial non default instances names (#1958)
(cherry picked from commit fa8af098c8)
2025-04-06 03:33:45 +02:00
Jan-Otto Kröpke
50808c73fe logon: deprecate collector. Use terminal_services instead (#1957)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
(cherry picked from commit 0846c2805f)
2025-04-06 03:33:45 +02:00
Jan-Otto Kröpke
fe17f5f597 memory: fix panics if metrics does not exists (#1960)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>

(cherry picked from commit ecc805f0fa)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-04-06 03:33:45 +02:00
Jan-Otto Kröpke
b62c724977 service: fix windows.EnumServicesStatusEx reports buffer too small (#1954)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
(cherry picked from commit 63efa92be7)
2025-04-06 03:33:45 +02:00
Jan-Otto Kröpke
7252d403ae fix: return Windows 11 as product name, if build number is >= 22000 (#1935)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
(cherry picked from commit 041c2cd170)
2025-04-06 03:33:45 +02:00
Jan-Otto Kröpke
3180315cff hyperv: fix Windows Server 2016 compatibility (#1925)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
(cherry picked from commit bc1b40c679)
2025-04-06 03:33:45 +02:00
Jan-Otto Kröpke
9da6e56fcf fix: buffer length panic (#1936)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
(cherry picked from commit eecc6ce574)
2025-04-06 03:33:42 +02:00
Jan-Otto Kröpke
c300935170 fix: windows_cpu_processor_utility_total is always 0 (#1966)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>

(cherry picked from commit 9db4318ea9)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-04-05 22:23:53 +02:00
Jan-Otto Kröpke
6f0209ddb7 time: windows_time_clock_frequency_adjustment_ppb_total -> windows_time_clock_frequency_adjustment_ppb and add windows_time_clock_frequency_adjustment metric for Win2016 (#1910)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
(cherry picked from commit d6196c5c6b)
2025-04-04 23:18:14 +02:00
Jan-Otto Kröpke
a56e1ac71a fix: Support running as Windows Service within containers (#1907)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
(cherry picked from commit 3f2633d0b0)
2025-03-15 10:11:42 +01:00
Jan-Otto Kröpke
0c44a934f4 fix: update dependencies (#1920)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-03-11 10:46:14 +01:00
dependabot[bot]
d1151e91f3 chore(deps): bump github.com/prometheus/client_golang from 1.21.0 to 1.21.1 (#1919)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-10 15:52:45 +01:00
Jan-Otto Kröpke
cbe94c1ea5 netframework: fix metric names (re-add the collector sub-type to metrics) (#1908)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-03-01 17:22:59 +01:00
Jan-Otto Kröpke
b809f5a8ee docs: added examples for alternative installer dir (#1909)
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2025-03-01 00:17:48 +01:00
dependabot[bot]
756d9c160d chore(deps): bump github.com/prometheus/client_golang from 1.21.0-rc.0 to 1.21.0 (#1899)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-01 00:09:41 +01:00
Jan-Otto Kröpke
a0e132b30e terminal_services: fix panic in collect (#1906) 2025-02-28 07:53:23 +01:00
Jan-Otto Kröpke
d645e89be9 ci: fix checksum (#1905) 2025-02-28 07:53:10 +01:00
Jan-Otto Kröpke
a73a08d704 fix: log to the Windows temp directory if of service detection failures. (#1890) 2025-02-28 01:35:41 +01:00
Jan-Otto Kröpke
228164765b docs: fix physical_disk docs (#1897) 2025-02-22 08:29:52 +00:00
Jan-Otto Kröpke
4c9c78c599 time: fix panic if counters aren't present (#1898) 2025-02-22 09:29:02 +01:00
Jan-Otto Kröpke
4b3c154049 docs: add disk activity query. (#1889)
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2025-02-18 13:41:26 +01:00
Jan-Otto Kröpke
be0037eda5 ci: pin wix toolset version to avoid installing incompatible extensions (#1885)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-02-13 20:56:44 +01:00
Jan-Otto Kröpke
367fae95c4 mscluster: restore support for Windows Server 2016-2019 (#1882)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-02-12 21:03:24 +01:00
Jan-Otto Kröpke
96ffc3bf3f config: multiple web.listen-address args results into an error, if --config.file is defined. (#1876)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-02-12 20:30:12 +01:00
Jan-Otto Kröpke
285c4cc5ea feat: windows_exporter uses own event log source to correctly format messages. (#1873)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2025-02-10 18:57:31 +01:00
Jan-Otto Kröpke
f07aceb0dd cs: fix metric description (#1881)
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-02-10 18:23:35 +01:00
Jan-Otto Kröpke
dcacce4577 fix: sign binaries (#1878)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-02-09 22:34:55 +01:00
Jan-Otto Kröpke
fc5b3051fa feat: sign binaries (#1875)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-02-08 20:04:37 +01:00
Jan-Otto Kröpke
1b2958a7cc fix: slow stop if run as service (#1870)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-02-07 22:26:09 +01:00
112 changed files with 1877 additions and 1291 deletions

View File

@@ -13,4 +13,4 @@ indent_size = 4
[*.{yml,yaml}]
indent_style = space
indent_size = 2
indent_size = 2

View File

@@ -1,3 +1,5 @@
<!--
Please give your PR a title in the form "area: short description". For example "cpu: reduce usage by 95%" or "docs: fix typo in installation.md".

View File

@@ -4,27 +4,13 @@ name: Linting
# have been changed.
on:
push:
paths:
- "go.mod"
- "go.sum"
- "**.go"
- ".github/workflows/lint.yml"
- "tools/e2e-output.txt"
branches:
- master
- next
- main
- "0.*"
- "1.*"
pull_request:
paths:
- "go.mod"
- "go.sum"
- "**.go"
- ".github/workflows/lint.yml"
- "tools/e2e-output.txt"
branches:
- master
- next
- main
env:
VERSION_PROMU: '0.14.0'
@@ -105,4 +91,4 @@ jobs:
uses: golangci/golangci-lint-action@v6
with:
version: v1.60
args: "--max-same-issues=0"
args: "--max-same-issues=0"

View File

@@ -37,7 +37,7 @@ jobs:
- name: check
run: |
PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1)
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
exit 0
fi

View File

@@ -22,6 +22,7 @@ env:
jobs:
build:
runs-on: windows-2022
environment: build
steps:
- uses: actions/checkout@v4
with:
@@ -32,13 +33,14 @@ jobs:
go-version-file: 'go.mod'
- name: Install WiX
run: dotnet tool install --global wix
run: |
dotnet tool install --global wix --version 5.0.2
- name: Install WiX extensions
run: |
wix extension add -g WixToolset.Util.wixext
wix extension add -g WixToolset.Ui.wixext
wix extension add -g WixToolset.Firewall.wixext
wix extension add -g WixToolset.Util.wixext/5.0.2
wix extension add -g WixToolset.Ui.wixext/5.0.2
wix extension add -g WixToolset.Firewall.wixext/5.0.2
- name: Install Build deps
run: |
@@ -68,6 +70,40 @@ jobs:
Get-ChildItem -Path output
- name: Sign build artifacts
if: ${{ (github.event_name != 'pull_request' && github.repository == 'prometheus-community/windows_exporter') || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'prometheus-community/windows_exporter') }}
run: |
$ErrorActionPreference = "Stop"
$Version = Get-Content VERSION
$b64 = $env:CODE_SIGN_KEY
$filename = 'windows_exporter_CodeSign.pfx'
$bytes = [Convert]::FromBase64String($b64)
[IO.File]::WriteAllBytes($filename, $bytes)
$basePath = "C:\Program Files (x86)\Windows Kits\10\bin"
$latestSigntool = Get-ChildItem -Path $basePath -Directory |
Where-Object { $_.Name -match "^\d+\.\d+\.\d+\.\d+$" } |
Sort-Object { [Version]$_.Name } -Descending |
Select-Object -First 1 |
ForEach-Object { Join-Path $_.FullName "x64\signtool.exe" }
if (Test-Path $latestSigntool) {
Write-Output $latestSigntool
} else {
Write-Output "signtool.exe not found"
}
foreach($Arch in "amd64", "arm64") {
& $latestSigntool sign /v /tr "http://timestamp.digicert.com" /d "Prometheus exporter for Windows machines" /td SHA256 /fd SHA256 /a /f "windows_exporter_CodeSign.pfx" /p $env:CODE_SIGN_PASSWORD "output\windows_exporter-$Version-$Arch.exe"
}
rm windows_exporter_CodeSign.pfx
env:
CODE_SIGN_KEY: ${{ secrets.CODE_SIGN_KEY }}
CODE_SIGN_PASSWORD: ${{ secrets.CODE_SIGN_PASSWORD }}
- name: Build Release Artifacts
run: |
$ErrorActionPreference = "Stop"
@@ -79,9 +115,46 @@ jobs:
}
Move-Item installer\*.msi output\
Get-ChildItem -Path output\
Get-ChildItem -Path output\ g
promu checksum output\
- name: Sign installer artifacts
if: ${{ (github.event_name != 'pull_request' && github.repository == 'prometheus-community/windows_exporter') || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'prometheus-community/windows_exporter') }}
run: |
$ErrorActionPreference = "Stop"
$Version = Get-Content VERSION
$b64 = $env:CODE_SIGN_KEY
$filename = 'windows_exporter_CodeSign.pfx'
$bytes = [Convert]::FromBase64String($b64)
[IO.File]::WriteAllBytes($filename, $bytes)
$basePath = "C:\Program Files (x86)\Windows Kits\10\bin"
$latestSigntool = Get-ChildItem -Path $basePath -Directory |
Where-Object { $_.Name -match "^\d+\.\d+\.\d+\.\d+$" } |
Sort-Object { [Version]$_.Name } -Descending |
Select-Object -First 1 |
ForEach-Object { Join-Path $_.FullName "x64\signtool.exe" }
if (Test-Path $latestSigntool) {
Write-Output $latestSigntool
} else {
Write-Output "signtool.exe not found"
}
foreach($Arch in "amd64", "arm64") {
& $latestSigntool sign /v /tr "http://timestamp.digicert.com" /d "Prometheus exporter for Windows machines" /td SHA256 /fd SHA256 /a /f "windows_exporter_CodeSign.pfx" /p $env:CODE_SIGN_PASSWORD "output\windows_exporter-$Version-$Arch.msi"
}
rm windows_exporter_CodeSign.pfx
env:
CODE_SIGN_KEY: ${{ secrets.CODE_SIGN_KEY }}
CODE_SIGN_PASSWORD: ${{ secrets.CODE_SIGN_PASSWORD }}
- name: Generate checksums
run: |
promu checksum output
cat output\sha256sums.txt
- name: Upload Artifacts
uses: actions/upload-artifact@v4
@@ -103,6 +176,9 @@ jobs:
runs-on: ubuntu-latest
needs:
- build
env:
DOCKER_BUILD_SUMMARY: false
DOCKER_BUILD_RECORD_UPLOAD: false
steps:
- uses: actions/checkout@v4
with:
@@ -165,3 +241,4 @@ jobs:
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: windows/amd64
annotations: ${{ steps.meta.outputs.labels }}

View File

@@ -33,7 +33,6 @@ Name | Description | Enabled by default
[iis](docs/collector.iis.md) | IIS sites and applications |
[license](docs/collector.license.md) | Windows license status |
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | &#10003;
[logon](docs/collector.logon.md) | User logon sessions |
[memory](docs/collector.memory.md) | Memory usage metrics | &#10003;
[mscluster](docs/collector.mscluster.md) | MSCluster metrics |
[msmq](docs/collector.msmq.md) | MSMQ queues |
@@ -100,6 +99,9 @@ windows_exporter accepts flags to configure certain behaviours. The ones configu
The latest release can be downloaded from the [releases page](https://github.com/prometheus-community/windows_exporter/releases).
All binaries and installation packages are signed with an self-signed certificate. The public key can be found [here](https://github.com/prometheus-community/windows_exporter/blob/master/installer/codesign.cer).
Once import into the trusted root certificate store, the binaries and installation packages will be trusted.
Each release provides a .msi installer. The installer will setup the windows_exporter as a Windows service, as well as create an exception in the Windows Firewall.
If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc.
@@ -125,6 +127,8 @@ The following parameters are available:
| `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. For `--collectors.enabled` and `--config.file`, use the specialized properties `ENABLED_COLLECTORS` and `CONFIG_FILE` |
| `ADDLOCAL` | Enables features within the windows_exporter installer. Supported values: `FirewallException` |
| `REMOVE` | Disables features within the windows_exporter installer. Supported values: `FirewallException` |
| `APPLICATIONFOLDER` | Directory to install windows_exporter. Defaults to `C:\Program Files\windows_exporter` |
Parameters are sent to the installer via `msiexec`.
On PowerShell, the `--%` should be passed before defining properties.
@@ -145,6 +149,11 @@ Define a config file.
msiexec /i <path-to-msi-file> --% CONFIG_FILE="D:\config.yaml"
```
Alternative install directory
```powershell
msiexec /i <path-to-msi-file> --% ADDLOCAL=FirewallException APPLICATIONFOLDER="F:\Program Files\windows_exporter"
```
On some older versions of Windows,
you may need to surround parameter values with double quotes to get the installation command parsing properly:
```powershell

View File

@@ -14,8 +14,11 @@
package main
import (
"errors"
"fmt"
"os"
"strings"
"unsafe"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
@@ -33,6 +36,9 @@ var (
// stopCh is a channel to send a signal to the service manager that the service is stopping.
stopCh = make(chan struct{})
// serviceManagerFinishedCh is a channel to send a signal to the main function that the service manager has stopped the service.
serviceManagerFinishedCh = make(chan struct{}, 1)
)
// IsService variable declaration allows initiating time-sensitive components like registering the Windows service
@@ -49,33 +55,37 @@ var (
//
//nolint:gochecknoglobals
var IsService = func() bool {
defer func() {
go func() {
err := svc.Run(serviceName, &windowsExporterService{})
if err == nil {
return
}
_ = logToEventToLog(windows.EVENTLOG_ERROR_TYPE, fmt.Sprintf("failed to start service: %v", err))
}()
}()
var err error
isService, err := svc.IsWindowsService()
isService, err := isWindowsService()
if err != nil {
_ = logToEventToLog(windows.EVENTLOG_ERROR_TYPE, fmt.Sprintf("failed to detect service: %v", err))
logToFile(fmt.Sprintf("failed to detect service: %v", err))
exitCodeCh <- 1
return false
}
if !isService {
return false
}
defer func() {
go func() {
err := svc.Run(serviceName, &windowsExporterService{})
if err != nil {
// https://github.com/open-telemetry/opentelemetry-collector/pull/9042
if !errors.Is(err, windows.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT) {
if logErr := logToEventToLog(windows.EVENTLOG_ERROR_TYPE, fmt.Sprintf("failed to start service: %v", err)); logErr != nil {
logToFile(fmt.Sprintf("failed to start service: %v", err))
}
}
}
serviceManagerFinishedCh <- struct{}{}
}()
}()
if err := logToEventToLog(windows.EVENTLOG_INFORMATION_TYPE, "attempting to start exporter service"); err != nil {
//nolint:gosec
_ = os.WriteFile("C:\\Program Files\\windows_exporter\\start-service.error.log", []byte(fmt.Sprintf("failed sent log to event log: %v", err)), 0o644)
logToFile(fmt.Sprintf("failed sent log to event log: %v", err))
exitCodeCh <- 2
}
@@ -122,7 +132,7 @@ func (s *windowsExporterService) Execute(_ []string, r <-chan svc.ChangeRequest,
// logToEventToLog logs a message to the Windows event log.
func logToEventToLog(eType uint16, msg string) error {
eventLog, err := eventlog.Open("windows_exporter")
eventLog, err := eventlog.Open(serviceName)
if err != nil {
return fmt.Errorf("failed to open event log: %w", err)
}
@@ -130,18 +140,70 @@ func logToEventToLog(eType uint16, msg string) error {
_ = eventLog.Close()
}(eventLog)
p, err := windows.UTF16PtrFromString(msg)
if err != nil {
return fmt.Errorf("error convert string to UTF-16: %w", err)
switch eType {
case windows.EVENTLOG_ERROR_TYPE:
err = eventLog.Error(102, msg)
case windows.EVENTLOG_WARNING_TYPE:
err = eventLog.Warning(101, msg)
case windows.EVENTLOG_INFORMATION_TYPE:
err = eventLog.Info(100, msg)
}
zero := uint16(0)
ss := []*uint16{p, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero}
err = windows.ReportEvent(eventLog.Handle, eType, 0, 3299, 0, 9, 0, &ss[0], nil)
if err != nil {
return fmt.Errorf("error report event: %w", err)
}
return nil
}
func logToFile(msg string) {
if file, err := os.CreateTemp("", "windows_exporter.service.error.log"); err == nil {
_, _ = file.WriteString(msg)
_ = file.Close()
}
}
// isWindowsService is a clone of "golang.org/x/sys/windows/svc:IsWindowsService", but with a fix
// for Windows containers.
// Go cloned the .NET implementation of this function, which has since
// been patched to support Windows containers, which don't use Session ID 0 for services.
// https://github.com/dotnet/runtime/pull/74188
// This function can be replaced with go's once go brings in the fix.
//
// Copyright 2023-present Datadog, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// https://github.com/DataDog/datadog-agent/blob/46740e82ef40a04c4be545ed8c16a4b0d1f046cf/pkg/util/winutil/servicemain/servicemain.go#L128
func isWindowsService() (bool, error) {
var currentProcess windows.PROCESS_BASIC_INFORMATION
infoSize := uint32(unsafe.Sizeof(currentProcess))
err := windows.NtQueryInformationProcess(windows.CurrentProcess(), windows.ProcessBasicInformation, unsafe.Pointer(&currentProcess), infoSize, &infoSize)
if err != nil {
return false, err
}
var parentProcess *windows.SYSTEM_PROCESS_INFORMATION
for infoSize = uint32((unsafe.Sizeof(*parentProcess) + unsafe.Sizeof(uintptr(0))) * 1024); ; {
parentProcess = (*windows.SYSTEM_PROCESS_INFORMATION)(unsafe.Pointer(&make([]byte, infoSize)[0]))
err = windows.NtQuerySystemInformation(windows.SystemProcessInformation, unsafe.Pointer(parentProcess), infoSize, &infoSize)
if err == nil {
break
} else if !errors.Is(err, windows.STATUS_INFO_LENGTH_MISMATCH) {
return false, err
}
}
for ; ; parentProcess = (*windows.SYSTEM_PROCESS_INFORMATION)(unsafe.Pointer(uintptr(unsafe.Pointer(parentProcess)) + uintptr(parentProcess.NextEntryOffset))) {
if parentProcess.UniqueProcessID == currentProcess.InheritedFromUniqueProcessId {
return strings.EqualFold("services.exe", parentProcess.ImageName.String()), nil
}
if parentProcess.NextEntryOffset == 0 {
break
}
}
return false, nil
}

View File

@@ -47,7 +47,11 @@ import (
)
func main() {
exitCode := run()
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
exitCode := run(ctx, os.Args[1:])
stop()
// If we are running as a service, we need to signal the service control manager that we are done.
if !IsService {
@@ -57,12 +61,11 @@ func main() {
exitCodeCh <- exitCode
// Wait for the service control manager to signal that we are done.
<-stopCh
<-serviceManagerFinishedCh
}
func run() int {
func run(ctx context.Context, args []string) int {
startTime := time.Now()
ctx := context.Background()
app := kingpin.New("windows_exporter", "A metrics collector for Windows.")
@@ -71,7 +74,7 @@ func run() int {
"config.file",
"YAML configuration file to use. Values set in this file will be overridden by CLI flags.",
).String()
insecureSkipVerify = app.Flag(
_ = app.Flag(
"config.file.insecure-skip-verify",
"Skip TLS verification in loading YAML configuration.",
).Default("false").Bool()
@@ -122,11 +125,10 @@ func run() int {
// Initialize collectors before loading and parsing CLI arguments
collectors := collector.NewWithFlags(app)
// Load values from configuration file(s). Executable flags must first be parsed, in order
// to load the specified file(s).
if _, err := app.Parse(os.Args[1:]); err != nil {
//nolint:contextcheck
if err := config.Parse(app, args); err != nil {
//nolint:sloglint // we do not have an logger yet
slog.Error("Failed to parse CLI args",
slog.LogAttrs(ctx, slog.LevelError, "Failed to load configuration",
slog.Any("err", err),
)
@@ -137,61 +139,21 @@ func run() int {
logger, err := log.New(logConfig)
if err != nil {
//nolint:sloglint // we do not have an logger yet
slog.Error("failed to create logger",
logger.LogAttrs(ctx, slog.LevelError, "failed to create logger",
slog.Any("err", err),
)
return 1
}
if *configFile != "" {
resolver, err := config.NewResolver(ctx, *configFile, logger, *insecureSkipVerify)
if err != nil {
logger.Error("could not load config file",
slog.Any("err", err),
)
return 1
}
if err = resolver.Bind(app, os.Args[1:]); err != nil {
logger.ErrorContext(ctx, "failed to bind configuration",
slog.Any("err", err),
)
return 1
}
// NOTE: This is temporary fix for issue #1092, calling kingpin.Parse
// twice makes slices flags duplicate its value, this clean up
// the first parse before the second call.
*webConfig.WebListenAddresses = (*webConfig.WebListenAddresses)[1:]
// Parse flags once more to include those discovered in configuration file(s).
if _, err = app.Parse(os.Args[1:]); err != nil {
logger.ErrorContext(ctx, "failed to parse CLI args from YAML file",
slog.Any("err", err),
)
return 1
}
logger, err = log.New(logConfig)
if err != nil {
//nolint:sloglint // we do not have an logger yet
slog.Error("failed to create logger",
slog.Any("err", err),
)
return 1
}
if configFile != nil && *configFile != "" {
logger.InfoContext(ctx, "using configuration file: "+*configFile)
}
logger.LogAttrs(ctx, slog.LevelDebug, "logging has Started")
if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil {
logger.Error("failed to set process priority",
if err = setPriorityWindows(ctx, logger, os.Getpid(), *processPriority); err != nil {
logger.LogAttrs(ctx, slog.LevelError, "failed to set process priority",
slog.Any("err", err),
)
@@ -200,7 +162,7 @@ func run() int {
enabledCollectorList := expandEnabledCollectors(*enabledCollectors)
if err := collectors.Enable(enabledCollectorList); err != nil {
logger.Error("couldn't enable collectors",
logger.LogAttrs(ctx, slog.LevelError, "couldn't enable collectors",
slog.Any("err", err),
)
@@ -208,9 +170,9 @@ func run() int {
}
// Initialize collectors before loading
if err = collectors.Build(logger); err != nil {
if err = collectors.Build(ctx, logger); err != nil {
for _, err := range utils.SplitError(err) {
logger.Error("couldn't initialize collector",
logger.LogAttrs(ctx, slog.LevelError, "couldn't initialize collector",
slog.Any("err", err),
)
@@ -265,17 +227,14 @@ func run() int {
close(errCh)
}()
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, os.Kill)
defer stop()
select {
case <-ctx.Done():
logger.Info("Shutting down windows_exporter via kill signal")
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via kill signal")
case <-stopCh:
logger.Info("Shutting down windows_exporter via service control")
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via service control")
case err := <-errCh:
if err != nil {
logger.ErrorContext(ctx, "Failed to start windows_exporter",
logger.LogAttrs(ctx, slog.LevelError, "Failed to start windows_exporter",
slog.Any("err", err),
)
@@ -286,9 +245,9 @@ func run() int {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = server.Shutdown(ctx)
_ = server.Shutdown(ctx) //nolint:contextcheck // create a new context for server shutdown
logger.InfoContext(ctx, "windows_exporter has shut down")
logger.LogAttrs(ctx, slog.LevelInfo, "windows_exporter has shut down") //nolint:contextcheck
return 0
}
@@ -311,7 +270,7 @@ func logCurrentUser(logger *slog.Logger) {
}
// setPriorityWindows sets the priority of the current process to the specified value.
func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
func setPriorityWindows(ctx context.Context, logger *slog.Logger, pid int, priority string) error {
// Mapping of priority names to uin32 values required by windows.SetPriorityClass.
priorityStringToInt := map[string]uint32{
"realtime": windows.REALTIME_PRIORITY_CLASS,
@@ -329,7 +288,7 @@ func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
return nil
}
logger.LogAttrs(context.Background(), slog.LevelDebug, "setting process priority to "+priority)
logger.LogAttrs(ctx, slog.LevelDebug, "setting process priority to "+priority)
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
handle, err := windows.OpenProcess(

View File

@@ -0,0 +1,188 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package main
import (
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/sys/windows"
)
//nolint:tparallel
func TestRun(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
name string
args []string
config string
metricsEndpoint string
exitCode int
}{
{
name: "default",
args: []string{},
metricsEndpoint: "http://127.0.0.1:9182/metrics",
},
{
name: "web.listen-address",
args: []string{"--web.listen-address=127.0.0.1:8080"},
metricsEndpoint: "http://127.0.0.1:8080/metrics",
},
{
name: "web.listen-address",
args: []string{"--web.listen-address=127.0.0.1:8081", "--web.listen-address=[::1]:8081"},
metricsEndpoint: "http://[::1]:8081/metrics",
},
{
name: "config",
args: []string{"--config.file=config.yaml"},
config: `{"web":{"listen-address":"127.0.0.1:8082"}}`,
metricsEndpoint: "http://127.0.0.1:8082/metrics",
},
{
name: "web.listen-address with config",
args: []string{"--config.file=config.yaml", "--web.listen-address=127.0.0.1:8084"},
config: `{"web":{"listen-address":"127.0.0.1:8083"}}`,
metricsEndpoint: "http://127.0.0.1:8084/metrics",
},
} {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if tc.config != "" {
// Create a temporary config file.
tmpfile, err := os.CreateTemp(t.TempDir(), "config-*.yaml")
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, tmpfile.Close())
})
_, err = tmpfile.WriteString(tc.config)
require.NoError(t, err)
for i, arg := range tc.args {
tc.args[i] = strings.ReplaceAll(arg, "config.yaml", tmpfile.Name())
}
}
exitCodeCh := make(chan int)
var stdout string
go func() {
stdout = captureOutput(t, func() {
// Simulate the service control manager signaling that we are done.
exitCodeCh <- run(ctx, tc.args)
})
}()
t.Cleanup(func() {
select {
case exitCode := <-exitCodeCh:
require.Equal(t, tc.exitCode, exitCode)
case <-time.After(2 * time.Second):
t.Fatalf("timed out waiting for exit code, want %d", tc.exitCode)
}
})
if tc.exitCode != 0 {
return
}
uri, err := url.Parse(tc.metricsEndpoint)
require.NoError(t, err)
err = waitUntilListening(t, "tcp", uri.Host)
require.NoError(t, err, "LOGS:\n%s", stdout)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, tc.metricsEndpoint, nil)
require.NoError(t, err)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err, "LOGS:\n%s", stdout)
require.Equal(t, http.StatusOK, resp.StatusCode)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
err = resp.Body.Close()
require.NoError(t, err)
require.NotEmpty(t, body)
require.Contains(t, string(body), "# HELP windows_exporter_build_info")
cancel()
})
}
}
func captureOutput(tb testing.TB, f func()) string {
tb.Helper()
orig := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
f()
os.Stdout = orig
_ = w.Close()
out, _ := io.ReadAll(r)
return string(out)
}
func waitUntilListening(tb testing.TB, network, address string) error {
tb.Helper()
var (
conn net.Conn
err error
)
for range 10 {
conn, err = net.DialTimeout(network, address, 100*time.Millisecond)
if err == nil {
_ = conn.Close()
return nil
}
if errors.Is(err, windows.Errno(10061)) {
time.Sleep(50 * time.Millisecond)
continue
}
}
return fmt.Errorf("listener not listening: %w", err)
}

View File

@@ -1,23 +1,2 @@
# example configuration file for windows_exporter
collectors:
enabled: cpu,cpu_info,exchange,iis,logical_disk,logon,memory,net,os,performancecounter,process,remote_fx,service,system,tcp,time,terminal_services,textfile
collector:
service:
include: "windows_exporter"
performancecounter:
objects: |-
- name: photon_udp
object: "Photon Socket Server: UDP"
instances: ["*"]
counters:
- name: "UDP: Datagrams in"
metric: "photon_udp_datagrams"
labels:
direction: "in"
- name: "UDP: Datagrams out"
metric: "photon_udp_datagrams"
labels:
direction: "out"
log:
level: warn
web:
listen-address: ":9183"

View File

@@ -3,14 +3,19 @@
The dns collector exposes metrics about the DNS server
|||
-|-
Metric name prefix | `dns`
Classes | [`Win32_PerfRawData_DNS_DNS`](https://technet.microsoft.com/en-us/library/cc977686.aspx)
Enabled by default? | No
-|-|-
Metric name prefix | `dns` |
Classes | [`Win32_PerfRawData_DNS_DNS`](https://technet.microsoft.com/en-us/library/cc977686.aspx) |
Enabled by default | Yes |
Metric name prefix (error stats) | `windows_dns` |
Classes | [`MicrosoftDNS_Statistic`](https://learn.microsoft.com/en-us/windows/win32/dns/dns-wmi-provider-overview) |
Enabled by default (error stats)? | Yes |
## Flags
None
Name | Description
-----|------------
`collector.dns.enabled` | Comma-separated list of collectors to use. Available collectors: `metrics`, `error_stats`. Defaults to all collectors if not specified.
## Metrics
@@ -38,12 +43,56 @@ Name | Description | Type | Labels
`windows_dns_wins_queries_total` | _Not yet documented_ | counter | `direction`
`windows_dns_wins_responses_total` | _Not yet documented_ | counter | `direction`
`windows_dns_unmatched_responses_total` | _Not yet documented_ | counter | None
`windows_dns_error_stats_total` | DNS error statistics from MicrosoftDNS_Statistic | counter | `name`, `collection_name`, `dns_server`
### Sub-collectors
The DNS collector is split into two sub-collectors:
1. `metrics` - Collects standard DNS performance metrics using PDH (Performance Data Helper)
2. `wmi_stats` - Collects DNS error statistics from the MicrosoftDNS_Statistic WMI class
By default, both sub-collectors are enabled. You can enable specific sub-collectors using the `collector.dns.enabled` flag.
### Example Usage
To enable only DNS error statistics collection:
```powershell
windows_exporter.exe --collector.dns.enabled=wmi_stats
```
To enable only standard DNS metrics:
```powershell
windows_exporter.exe --collector.dns.enabled=metrics
```
To enable both (default behavior):
```powershell
windows_exporter.exe --collector.dns.enabled=metrics,wmi_stats
```
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
```
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="BadKey"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="BadSig"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="BadTime"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="FormError"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="Max"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NoError"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NotAuth"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NotImpl"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NotZone"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NxDomain"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NxRRSet"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="Refused"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="ServFail"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="UnknownError"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="YxDomain"} 0
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="YxRRSet"} 0
```
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -43,7 +43,7 @@ Comma-separated list of collectors to use, for example: `--collectors.exchange.e
| `windows_exchange_transport_queues_messages_submitted_total` | Messages Submitted Total |
| `windows_exchange_transport_queues_messages_delayed_total` | Messages Delayed Total |
| `windows_exchange_transport_queues_messages_completed_delivery_total` | Messages Completed Delivery Total |
| `windows_exchange_transport_queues_shadow_queue_length` | Shadow Queue Length |
| `windows_exchange_transport_queues_aggregate_shadow_queue_length` | The current number of messages in shadow queues |
| `windows_exchange_transport_queues_submission_queue_length` | Submission Queue Length |
| `windows_exchange_transport_queues_delay_queue_length` | Delay Queue Length |
| `windows_exchange_transport_queues_items_completed_delivery_total` | Items Completed Delivery Total |
@@ -54,7 +54,7 @@ Comma-separated list of collectors to use, for example: `--collectors.exchange.e
| `windows_exchange_http_proxy_avg_auth_latency` | Average time spent authenticating CAS requests over the last 200 samples |
| `windows_exchange_http_proxy_outstanding_proxy_requests` | Number of concurrent outstanding proxy requests |
| `windows_exchange_http_proxy_requests_total` | Number of proxy requests processed each second |
| `windows_exchange_avail_service_requests_per_sec` | Number of requests serviced per second |
| `windows_exchange_availability_service_requests_per_sec` | Number of requests serviced per second |
| `windows_exchange_owa_current_unique_users` | Number of unique users currently logged on to Outlook Web App |
| `windows_exchange_owa_requests_total` | Number of requests handled by Outlook Web App per second |
| `windows_exchange_autodiscover_requests_total` | Number of autodiscover service requests processed each second |
@@ -77,4 +77,3 @@ _This collector does not yet have any useful queries added, we would appreciate
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -69,6 +69,23 @@ Show volume usage (%)
100.0 - 100 * (windows_logical_disk_free_bytes{instance="localhost", volume="C:"} / windows_logical_disk_size_bytes{instance="localhost", volume="C:"})
```
Disk Activity
```promql
(
rate(windows_logical_disk_read_seconds_total[2m])
+
rate(windows_logical_disk_write_seconds_total[2m])
)
/
(
rate(windows_logical_disk_read_seconds_total[2m])
+
rate(windows_logical_disk_write_seconds_total[2m])
+
rate(windows_logical_disk_idle_seconds_total[2m])
)
```
## Alerting examples
**prometheus.rules**
```yaml

View File

@@ -1,78 +0,0 @@
# logon collector
The logon collector exposes metrics detailing the active user logon sessions.
| | |
|---------------------|-----------|
| Metric name prefix | `logon` |
| Source | Win32 API |
| Enabled by default? | No |
## Flags
None
## Metrics
| Name | Description | Type | Labels |
|-------------------------------------------|--------------------------------------------|-------|------------------------------------|
| `windows_logon_session_logon_timestamp_seconds` | timestamp of the logon session in seconds. | gauge | `domain`, `id`, `type`, `username` |
### Example metric
Query the total number of interactive logon sessions
```
# HELP windows_logon_session_logon_timestamp_seconds timestamp of the logon session in seconds.
# TYPE windows_logon_session_logon_timestamp_seconds gauge
windows_logon_session_logon_timestamp_seconds{domain="",id="0x0:0x8c54",type="System",username=""} 1.72876928e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x991a",type="Interactive",username="UMFD-1"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x9933",type="Interactive",username="UMFD-0"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x994a",type="Interactive",username="UMFD-0"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x999d",type="Interactive",username="UMFD-1"} 1.728769282e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf25a",type="Interactive",username="UMFD-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf290",type="Interactive",username="UMFD-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x130241",type="Network",username="vm-jok-dev$"} 1.728769625e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x24f7c9",type="Network",username="vm-jok-dev$"} 1.728770121e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x276846",type="Network",username="vm-jok-dev$"} 1.728770195e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e4",type="Service",username="vm-jok-dev$"} 1.728769283e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e7",type="System",username="vm-jok-dev$"} 1.728769279e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x71d0f",type="Network",username="vm-jok-dev$"} 1.728769324e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x720a3",type="Network",username="vm-jok-dev$"} 1.728769324e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x725cb",type="Network",username="vm-jok-dev$"} 1.728769324e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x753d8",type="Network",username="vm-jok-dev$"} 1.728769325e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xa3913",type="Network",username="vm-jok-dev$"} 1.728769385e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xbe7f2",type="Network",username="jok"} 1.728769531e+09
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xc76c4",type="RemoteInteractive",username="jok"} 1.728769533e+09
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e3",type="Service",username="IUSR"} 1.728769295e+09
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e5",type="Service",username="LOCAL SERVICE"} 1.728769283e+09
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xae4c7",type="Service",username="MSSQLSERVER"} 1.728769425e+09
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xb42f1",type="Service",username="SQLTELEMETRY"} 1.728769431e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfbac",type="Interactive",username="DWM-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfc72",type="Interactive",username="DWM-2"} 1.728769532e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdedd",type="Interactive",username="DWM-1"} 1.728769283e+09
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdefd",type="Interactive",username="DWM-1"} 1.728769283e+09
```
### Possible values for `type`
- System
- Interactive
- Network
- Batch
- Service
- Proxy
- Unlock
- NetworkCleartext
- NewCredentials
- RemoteInteractive
- CachedInteractive
- CachedRemoteInteractive
- CachedUnlock
## Useful queries
Query the total number of local and remote (I.E. Terminal Services) interactive sessions.
```
count(windows_logon_logon_type{type=~"Interactive|RemoteInteractive"}) by (type)
```
## Alerting examples
_This collector doesnt yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -21,19 +21,21 @@ If given, a disk needs to *not* match the exclude regexp in order for the corres
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `disk`
`read_bytes_total` | Rate at which bytes are transferred from the disk during read operations | counter | `disk`
`reads_total` | Rate of read operations on the disk | counter | `disk`
`write_bytes_total` | Rate at which bytes are transferred to the disk during write operations | counter | `disk`
`writes_total` | Rate of write operations on the disk | counter | `disk`
`read_seconds_total` | Seconds the disk was busy servicing read requests | counter | `disk`
`write_seconds_total` | Seconds the disk was busy servicing write requests | counter | `disk`
`free_bytes` | Unused space of the disk in bytes (not real time, updates every 10-15 min) | gauge | `disk`
`size_bytes` | Total size of the disk in bytes (not real time, updates every 10-15 min) | gauge | `disk`
`idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `disk`
`split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `disk`
| Name | Description | Type | Labels |
|--------------------------------------------------------|---------------------------------------------------------------------------------------------------------|---------|--------|
| windows_physical_disk_requests_queued | The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength) | Gauge | disk |
| windows_physical_disk_read_bytes_total | The number of bytes transferred from the disk during read operations (PhysicalDisk.DiskReadBytesPerSec) | Counter | disk |
| windows_physical_disk_reads_total | The number of read operations on the disk (PhysicalDisk.DiskReadsPerSec) | Counter | disk |
| windows_physical_disk_write_bytes_total | The number of bytes transferred to the disk during write operations (PhysicalDisk.DiskWriteBytesPerSec) | Counter | disk |
| windows_physical_disk_writes_total | The number of write operations on the disk (PhysicalDisk.DiskWritesPerSec) | Counter | disk |
| windows_physical_disk_read_seconds_total | Seconds that the disk was busy servicing read requests (PhysicalDisk.PercentDiskReadTime) | Counter | disk |
| windows_physical_disk_write_seconds_total | Seconds that the disk was busy servicing write requests (PhysicalDisk.PercentDiskWriteTime) | Counter | disk |
| windows_physical_disk_idle_seconds_total | Seconds that the disk was idle (PhysicalDisk.PercentIdleTime) | Counter | disk |
| windows_physical_disk_split_ios_total | The number of I/Os to the disk that were split into multiple I/Os (PhysicalDisk.SplitIOPerSec) | Counter | disk |
| windows_physical_disk_read_latency_seconds_total | The average time, in seconds, of a read operation from the disk (PhysicalDisk.AvgDiskSecPerRead) | Counter | disk |
| windows_physical_disk_write_latency_seconds_total | The average time, in seconds, of a write operation to the disk (PhysicalDisk.AvgDiskSecPerWrite) | Counter | disk |
| windows_physical_disk_read_write_latency_seconds_total | The time, in seconds, of the average disk transfer (PhysicalDisk.AvgDiskSecPerTransfer) | Counter | disk |
### Warning about size metrics
The `free_bytes` and `size_bytes` metrics are not updated in real time and might have a delay of 10-15min.
@@ -52,29 +54,4 @@ rate(windows_physical_disk_reads_total{instance="localhost", disk=~"0"}[2m]) + r
```
## Alerting examples
**prometheus.rules**
```yaml
groups:
- name: Windows Disk Alerts
rules:
# Sends an alert when disk space usage is above 95%
- alert: DiskSpaceUsage
expr: 100.0 - 100 * (windows_physical_disk_free_bytes / windows_physical_disk_size_bytes) > 95
for: 10m
labels:
severity: high
annotations:
summary: "Disk Space Usage (instance {{ $labels.instance }})"
description: "Disk Space on Drive is used more than 95%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
# Alerts on disks with over 85% space usage predicted to fill within the next four days
- alert: DiskFilling
expr: 100 * (windows_physical_disk_free_bytes / windows_physical_disk_size_bytes) < 15 and predict_linear(windows_physical_disk_free_bytes[6h], 4 * 24 * 3600) < 0
for: 10m
labels:
severity: warning
annotations:
summary: "Disk full in four days (instance {{ $labels.instance }})"
description: "{{ $labels.disk }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
```
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -16,7 +16,7 @@ None
| Name | Description | Type | Labels |
|----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|--------|
| `windows_system_boot_time_timestamp_seconds` | Unix timestamp of last system boot | gauge | None |
| `windows_system_boot_time_timestamp` | Unix timestamp of last system boot | gauge | None |
| `windows_system_context_switches_total` | Total number of [context switches](https://en.wikipedia.org/wiki/Context_switch) | counter | None |
| `windows_system_exception_dispatches_total` | Total exceptions dispatched by the system | counter | None |
| `windows_system_processes` | Number of process contexts currently loaded or running on the operating system | gauge | None |
@@ -41,7 +41,7 @@ windows_system_processes{instance="localhost"}
## Useful queries
Find hosts that have rebooted in the last 24 hours
```
time() - windows_system_boot_time_timestamp_seconds < 86400
time() - windows_system_boot_time_timestamp < 86400
```
## Alerting examples

View File

@@ -21,16 +21,17 @@ Matching is case-sensitive.
## Metrics
| Name | Description | Type | Labels |
|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------|
| `windows_time_clock_frequency_adjustment_ppb_total` | Total adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | counter | None |
| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None |
| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None |
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None |
| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` |
| Name | Description | Type | Labels |
|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------|
| `windows_time_clock_frequency_adjustment` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None |
| `windows_time_clock_frequency_adjustment_ppb` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None |
| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None |
| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None |
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None |
| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` |
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_

33
go.mod
View File

@@ -1,6 +1,8 @@
module github.com/prometheus-community/windows_exporter
go 1.23
go 1.23.0
toolchain go1.23.4
require (
github.com/Microsoft/hcsshim v0.12.9
@@ -8,13 +10,12 @@ require (
github.com/bmatcuk/doublestar/v4 v4.8.1
github.com/dimchansky/utfbom v1.1.1
github.com/go-ole/go-ole v1.3.0
github.com/google/uuid v1.6.0
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_golang v1.21.1
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.61.0
github.com/prometheus/exporter-toolkit v0.13.2
github.com/prometheus/common v0.62.0
github.com/prometheus/exporter-toolkit v0.14.0
github.com/stretchr/testify v1.10.0
golang.org/x/sys v0.29.0
golang.org/x/sys v0.31.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -23,7 +24,7 @@ require (
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/cgroups/v3 v3.0.4 // indirect
github.com/containerd/cgroups/v3 v3.0.5 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/typeurl/v2 v2.2.3 // indirect
@@ -32,7 +33,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -43,13 +44,13 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.24.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect
google.golang.org/grpc v1.68.0 // indirect
google.golang.org/protobuf v1.35.2 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/net v0.37.0 // indirect
golang.org/x/oauth2 v0.28.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/text v0.23.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/grpc v1.71.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

72
go.sum
View File

@@ -17,8 +17,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4=
github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@@ -66,14 +66,14 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -92,15 +92,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ=
github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -128,8 +128,10 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -145,30 +147,40 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -186,15 +198,19 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 h1:5bKytslY8ViY0Cj/ewmRtrWHW64bNF03cAatUUFCdFI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -204,8 +220,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

BIN
installer/codesign.cer Normal file

Binary file not shown.

View File

@@ -44,6 +44,12 @@
<ServiceDependency Id="wmiApSrv" />
</ServiceInstall>
<ServiceControl Id="ServiceStateControl" Name="windows_exporter" Remove="uninstall" Start="install" Stop="both"/>
<!-- The "Name" field must match the argument to eventlog.Open() -->
<util:EventSource Log="Application" Name="windows_exporter"
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
SupportsErrors="yes"
SupportsInformationals="yes"
SupportsWarnings="yes"/>
</Component>
<Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14">
<CreateFolder />

View File

@@ -130,13 +130,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
}
c.addressBookOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"),
"",
@@ -511,6 +504,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
}
return nil
}
@@ -520,6 +520,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -82,13 +82,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
}
c.requestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Total certificate requests processed",
@@ -168,6 +161,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
}
return nil
}

View File

@@ -112,13 +112,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err)
}
c.adLoginConnectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
"Total number of connection failures to an Active Directory domain controller",
@@ -378,6 +371,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err)
}
return nil
}
@@ -385,6 +385,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect ADFS metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect ADFS metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -98,13 +98,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err)
}
c.asyncCopyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)",
@@ -280,6 +273,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err)
}
return nil
}
@@ -288,6 +288,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Cache metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect Cache metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -89,15 +89,8 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.mu = sync.Mutex{}
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
c.logicalProcessors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logical_processor"),
"Total number of logical processors",
@@ -186,6 +179,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
c.processorRTCValues = map[string]utils.Counter{}
c.processorMPerfValues = map[string]utils.Counter{}
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
return nil
}

View File

@@ -75,18 +75,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if miSession == nil {
return errors.New("miSession is nil")
}
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQuery = miQuery
c.miSession = miSession
c.cpuInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "", Name),
"Labelled CPU information as provided by Win32_Processor",
@@ -148,6 +136,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
nil,
)
if miSession == nil {
return errors.New("miSession is nil")
}
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQuery = miQuery
c.miSession = miSession
var dst []miProcessor
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)

View File

@@ -37,7 +37,7 @@ type Collector struct {
config Config
// physicalMemoryBytes
// Deprecated: Use windows_physical_memory_total_bytes instead
// Deprecated: Use windows_memory_physical_total_bytes instead
physicalMemoryBytes *prometheus.Desc
// logicalProcessors
// Deprecated: Use windows_cpu_logical_processor instead
@@ -85,7 +85,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
)
c.physicalMemoryBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "physical_memory_bytes"),
"Deprecated: Use windows_physical_memory_total_bytes instead",
"Deprecated: Use windows_memory_physical_total_bytes instead",
nil,
nil,
)

View File

@@ -160,29 +160,6 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
var err error
if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
}
}
// connection
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
@@ -473,13 +450,36 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
}
}
return nil
}
// Collect implements the Collector interface.
// Sends metric values for each metric to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 3)
errs := make([]error, 0)
if slices.Contains(c.config.CollectorsEnabled, "connection") {
errs = append(errs, c.collectPDHConnection(ch))

View File

@@ -148,12 +148,79 @@ func (c *Collector) Close() error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
c.scopeInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_info"),
"DHCP Scope information",
[]string{"name", "superscope_name", "superscope_id", "scope"},
nil,
)
c.scopeState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_state"),
"DHCP Scope state",
[]string{"scope", "state"},
nil,
)
c.scopeAddressesFreeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"),
"DHCP Scope free addresses",
[]string{"scope"},
nil,
)
c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"),
"DHCP Scope free addresses on partner server",
[]string{"scope"},
nil,
)
c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"),
"DHCP Scope free addresses on this server",
[]string{"scope"},
nil,
)
c.scopeAddressesInUseTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"),
"DHCP Scope addresses in use",
[]string{"scope"},
nil,
)
c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"),
"DHCP Scope addresses in use on partner server",
[]string{"scope"},
nil,
)
c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"),
"DHCP Scope addresses in use on this server",
[]string{"scope"},
nil,
)
c.scopePendingOffersTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"),
"DHCP Scope pending offers",
[]string{"scope"},
nil,
)
c.scopeReservedAddressTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"),
"DHCP Scope reserved addresses",
[]string{"scope"},
nil,
)
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
c.packetsReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
@@ -304,78 +371,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
nil,
)
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
c.scopeInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_info"),
"DHCP Scope information",
[]string{"name", "superscope_name", "superscope_id", "scope"},
nil,
)
c.scopeState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_state"),
"DHCP Scope state",
[]string{"scope", "state"},
nil,
)
c.scopeAddressesFreeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"),
"DHCP Scope free addresses",
[]string{"scope"},
nil,
)
c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"),
"DHCP Scope free addresses on partner server",
[]string{"scope"},
nil,
)
c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"),
"DHCP Scope free addresses on this server",
[]string{"scope"},
nil,
)
c.scopeAddressesInUseTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"),
"DHCP Scope addresses in use",
[]string{"scope"},
nil,
)
c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"),
"DHCP Scope addresses in use on partner server",
[]string{"scope"},
nil,
)
c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"),
"DHCP Scope addresses in use on this server",
[]string{"scope"},
nil,
)
c.scopePendingOffersTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"),
"DHCP Scope pending offers",
[]string{"scope"},
nil,
)
c.scopeReservedAddressTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"),
"DHCP Scope reserved addresses",
[]string{"scope"},
nil,
)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
}
}
return nil
@@ -403,6 +403,8 @@ func (c *Collector) collectServerMetrics(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DHCP Server metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect DHCP Server metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -72,18 +72,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if miSession == nil {
return errors.New("miSession is nil")
}
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQuery = miQuery
c.miSession = miSession
c.diskInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"General drive information",
@@ -120,6 +108,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
nil,
)
if miSession == nil {
return errors.New("miSession is nil")
}
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQuery = miQuery
c.miSession = miSession
var dst []diskDrive
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)

View File

@@ -16,8 +16,11 @@
package dns
import (
"errors"
"fmt"
"log/slog"
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
@@ -26,12 +29,23 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const Name = "dns"
const (
Name = "dns"
subCollectorMetrics = "metrics"
subCollectorWMIStats = "wmi_stats"
)
type Config struct{}
type Config struct {
CollectorsEnabled []string `yaml:"collectors_enabled"`
}
//nolint:gochecknoglobals
var ConfigDefaults = Config{}
var ConfigDefaults = Config{
CollectorsEnabled: []string{
subCollectorMetrics,
subCollectorWMIStats,
},
}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
type Collector struct {
@@ -40,6 +54,9 @@ type Collector struct {
perfDataCollector *pdh.Collector
perfDataObject []perfDataCounterValues
miSession *mi.Session
miQuery mi.Query
dynamicUpdatesFailures *prometheus.Desc
dynamicUpdatesQueued *prometheus.Desc
dynamicUpdatesReceived *prometheus.Desc
@@ -62,6 +79,7 @@ type Collector struct {
zoneTransferResponsesReceived *prometheus.Desc
zoneTransferSuccessReceived *prometheus.Desc
zoneTransferSuccessSent *prometheus.Desc
dnsWMIStats *prometheus.Desc
}
func New(config *Config) *Collector {
@@ -69,6 +87,10 @@ func New(config *Config) *Collector {
config = &ConfigDefaults
}
if config.CollectorsEnabled == nil {
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
}
c := &Collector{
config: *config,
}
@@ -76,8 +98,26 @@ func New(config *Config) *Collector {
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
c.config.CollectorsEnabled = make([]string, 0)
var collectorsEnabled string
app.Flag(
"collector.dns.enabled",
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.Action(func(*kingpin.ParseContext) error {
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
return nil
})
return c
}
func (c *Collector) GetName() string {
@@ -90,14 +130,31 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DNS collector: %w", err)
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
for _, collector := range c.config.CollectorsEnabled {
if !slices.Contains([]string{subCollectorMetrics, subCollectorWMIStats}, collector) {
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
strings.Join([]string{subCollectorMetrics, subCollectorWMIStats}, ", "),
)
}
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
if err := c.buildMetricsCollector(); err != nil {
return err
}
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorWMIStats) {
if err := c.buildErrorStatsCollector(miSession); err != nil {
return err
}
}
return nil
}
func (c *Collector) buildMetricsCollector() error {
c.zoneTransferRequestsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
@@ -231,15 +288,65 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
c.dnsWMIStats = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "wmi_stats_total"),
"DNS WMI statistics from MicrosoftDNS_Statistic",
[]string{"name", "collection_name", "dns_server"},
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DNS collector: %w", err)
}
return nil
}
func (c *Collector) buildErrorStatsCollector(miSession *mi.Session) error {
if miSession == nil {
return errors.New("miSession is nil")
}
query, err := mi.NewQuery("SELECT Name, CollectionName, Value, DnsServerName FROM MicrosoftDNS_Statistic WHERE CollectionName = 'Error Stats'")
if err != nil {
return fmt.Errorf("failed to create query: %w", err)
}
c.miSession = miSession
c.miQuery = query
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0)
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
if err := c.collectMetrics(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting metrics: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorWMIStats) {
if err := c.collectErrorStats(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting WMI statistics: %w", err))
}
}
return errors.Join(errs...)
}
func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DNS metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect DNS metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(
@@ -493,3 +600,24 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return nil
}
func (c *Collector) collectErrorStats(ch chan<- prometheus.Metric) error {
var stats []Statistic
if err := c.miSession.Query(&stats, mi.NamespaceRootMicrosoftDNS, c.miQuery); err != nil {
return fmt.Errorf("failed to query DNS statistics: %w", err)
}
// Collect DNS error statistics
for _, stat := range stats {
ch <- prometheus.MustNewConstMetric(
c.dnsWMIStats,
prometheus.CounterValue,
float64(stat.Value),
stat.Name,
stat.CollectionName,
stat.DnsServerName,
)
}
return nil
}

View File

@@ -105,3 +105,11 @@ type perfDataCounterValues struct {
_ float64 `perfdata:"Zone Transfer SOA Request Sent"`
_ float64 `perfdata:"Zone Transfer Success"`
}
// Statistic represents the structure for DNS error statistics
type Statistic struct {
Name string `mi:"Name"`
CollectionName string `mi:"CollectionName"`
Value uint64 `mi:"Value"`
DnsServerName string `mi:"DnsServerName"`
}

View File

@@ -37,7 +37,7 @@ type perfDataCounterValuesAutoDiscover struct {
func (c *Collector) buildAutoDiscover() error {
var err error
c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](pdh.CounterTypeRaw, "MSExchange Autodiscover", pdh.InstancesAll)
c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](pdh.CounterTypeRaw, "MSExchangeAutodiscover", nil)
if err != nil {
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
}

View File

@@ -31,7 +31,7 @@ type collectorAvailabilityService struct {
}
type perfDataCounterValuesAvailabilityService struct {
RequestsPerSec float64 `perfdata:"Requests/sec"`
AvailabilityRequestsPerSec float64 `perfdata:"Availability Requests (sec)"`
}
func (c *Collector) buildAvailabilityService() error {
@@ -43,7 +43,7 @@ func (c *Collector) buildAvailabilityService() error {
}
c.availabilityRequestsSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "avail_service_requests_per_sec"),
prometheus.BuildFQName(types.Namespace, Name, "availability_service_requests_per_sec"),
"Number of requests serviced per second",
nil,
nil,
@@ -62,7 +62,7 @@ func (c *Collector) collectAvailabilityService(ch chan<- prometheus.Metric) erro
ch <- prometheus.MustNewConstMetric(
c.availabilityRequestsSec,
prometheus.CounterValue,
data.RequestsPerSec,
data.AvailabilityRequestsPerSec,
)
}

View File

@@ -39,7 +39,7 @@ type collectorTransportQueues struct {
messagesSubmittedTotal *prometheus.Desc
messagesDelayedTotal *prometheus.Desc
messagesCompletedDeliveryTotal *prometheus.Desc
shadowQueueLength *prometheus.Desc
aggregateShadowQueueLength *prometheus.Desc
submissionQueueLength *prometheus.Desc
delayQueueLength *prometheus.Desc
itemsCompletedDeliveryTotal *prometheus.Desc
@@ -63,7 +63,7 @@ type perfDataCounterValuesTransportQueues struct {
MessagesSubmittedTotal float64 `perfdata:"Messages Submitted Total"`
MessagesDelayedTotal float64 `perfdata:"Messages Delayed Total"`
MessagesCompletedDeliveryTotal float64 `perfdata:"Messages Completed Delivery Total"`
ShadowQueueLength float64 `perfdata:"Shadow Queue Length"`
AggregateShadowQueueLength float64 `perfdata:"Aggregate Shadow Queue Length"`
SubmissionQueueLength float64 `perfdata:"Submission Queue Length"`
DelayQueueLength float64 `perfdata:"Delay Queue Length"`
ItemsCompletedDeliveryTotal float64 `perfdata:"Items Completed Delivery Total"`
@@ -152,9 +152,9 @@ func (c *Collector) buildTransportQueues() error {
[]string{"name"},
nil,
)
c.shadowQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transport_queues_shadow_queue_length"),
"Shadow Queue Length",
c.aggregateShadowQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transport_queues_aggregate_shadow_queue_length"),
"The current number of messages in shadow queues.",
[]string{"name"},
nil,
)
@@ -280,9 +280,9 @@ func (c *Collector) collectTransportQueues(ch chan<- prometheus.Metric) error {
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.shadowQueueLength,
c.aggregateShadowQueueLength,
prometheus.GaugeValue,
data.ShadowQueueLength,
data.AggregateShadowQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(

View File

@@ -23,11 +23,10 @@ import (
"strings"
"sync"
"github.com/Microsoft/hcsshim/osversion"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
)
const (
@@ -149,7 +148,7 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled))
c.closeFns = make([]func(), 0, len(c.config.CollectorsEnabled))
@@ -157,19 +156,17 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
return nil
}
version := windows.RtlGetVersion()
subCollectors := map[string]struct {
build func() error
collect func(ch chan<- prometheus.Metric) error
close func()
minBuildNumber uint32
minBuildNumber uint16
}{
subCollectorDataStore: {
build: c.buildDataStore,
collect: c.collectDataStore,
close: c.perfDataCollectorDataStore.Close,
minBuildNumber: types.BuildNumberWindowsServer2022,
minBuildNumber: osversion.LTSC2022,
},
subCollectorDynamicMemoryBalancer: {
build: c.buildDynamicMemoryBalancer,
@@ -227,9 +224,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
close: c.perfDataCollectorVirtualNetworkAdapterDropReasons.Close,
},
subCollectorVirtualSMB: {
build: c.buildVirtualSMB,
collect: c.collectVirtualSMB,
close: c.perfDataCollectorVirtualSMB.Close,
build: c.buildVirtualSMB,
collect: c.collectVirtualSMB,
close: c.perfDataCollectorVirtualSMB.Close,
minBuildNumber: osversion.LTSC2022,
},
subCollectorVirtualStorageDevice: {
build: c.buildVirtualStorageDevice,
@@ -243,6 +241,8 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
},
}
buildNumber := osversion.Build()
// Result must order, to prevent test failures.
sort.Strings(c.config.CollectorsEnabled)
@@ -253,8 +253,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
return fmt.Errorf("unknown collector: %s", name)
}
if version.BuildNumber < subCollectors[name].minBuildNumber {
errs = append(errs, fmt.Errorf("collector %s requires Windows Server 2022 or newer", name))
if buildNumber < subCollectors[name].minBuildNumber {
logger.Warn(fmt.Sprintf(
"collector %s requires windows build version %d. Current build version: %d",
name, subCollectors[name].minBuildNumber, buildNumber,
), slog.String("collector", name))
continue
}

View File

@@ -18,6 +18,7 @@ package hyperv
import (
"fmt"
"github.com/Microsoft/hcsshim/osversion"
"github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
@@ -40,7 +41,7 @@ type perfDataCounterValuesDynamicMemoryBalancer struct {
// Hyper-V Dynamic Memory Balancer metrics
VmDynamicMemoryBalancerAvailableMemory float64 `perfdata:"Available Memory"`
VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing"`
VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing" perfdata_min_build:"17763"`
VmDynamicMemoryBalancerAveragePressure float64 `perfdata:"Average Pressure"`
VmDynamicMemoryBalancerSystemCurrentPressure float64 `perfdata:"System Current Pressure"`
}
@@ -96,12 +97,14 @@ func (c *Collector) collectDynamicMemoryBalancer(ch chan<- prometheus.Metric) er
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmDynamicMemoryBalancerAvailableMemoryForBalancing,
prometheus.GaugeValue,
utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing),
data.Name,
)
if osversion.Build() >= osversion.LTSC2019 {
ch <- prometheus.MustNewConstMetric(
c.vmDynamicMemoryBalancerAvailableMemoryForBalancing,
prometheus.GaugeValue,
utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing),
data.Name,
)
}
ch <- prometheus.MustNewConstMetric(
c.vmDynamicMemoryBalancerAveragePressure,

View File

@@ -18,6 +18,7 @@ package hyperv
import (
"fmt"
"github.com/Microsoft/hcsshim/osversion"
"github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
@@ -47,7 +48,7 @@ type perfDataCounterValuesDynamicMemoryVM struct {
// Hyper-V Dynamic Memory VM metrics
VmMemoryAddedMemory float64 `perfdata:"Added Memory"`
VmMemoryCurrentPressure float64 `perfdata:"Current Pressure"`
VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory"`
VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory" perfdata_min_build:"17763"`
VmMemoryGuestVisiblePhysicalMemory float64 `perfdata:"Guest Visible Physical Memory"`
VmMemoryMaximumPressure float64 `perfdata:"Maximum Pressure"`
VmMemoryMemoryAddOperations float64 `perfdata:"Memory Add Operations"`
@@ -150,12 +151,14 @@ func (c *Collector) collectDynamicMemoryVM(ch chan<- prometheus.Metric) error {
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.vmMemoryGuestAvailableMemory,
prometheus.GaugeValue,
utils.MBToBytes(data.VmMemoryGuestAvailableMemory),
data.Name,
)
if osversion.Build() >= osversion.LTSC2019 {
ch <- prometheus.MustNewConstMetric(
c.vmMemoryGuestAvailableMemory,
prometheus.GaugeValue,
utils.MBToBytes(data.VmMemoryGuestAvailableMemory),
data.Name,
)
}
ch <- prometheus.MustNewConstMetric(
c.vmMemoryGuestVisiblePhysicalMemory,

View File

@@ -40,7 +40,7 @@ type collectorHypervisorVirtualProcessor struct {
type perfDataCounterValuesHypervisorVirtualProcessor struct {
Name string
HypervisorVirtualProcessorGuestIdleTimePercent float64 `perfdata:"% Guest Idle Time"`
HypervisorVirtualProcessorGuestRunTimePercent float64 `perfdata:"% Guest Run Time"`
HypervisorVirtualProcessorHypervisorRunTimePercent float64 `perfdata:"% Hypervisor Run Time"`
HypervisorVirtualProcessorTotalRunTimePercent float64 `perfdata:"% Total Run Time"`
HypervisorVirtualProcessorRemoteRunTimePercent float64 `perfdata:"% Remote Run Time"`
@@ -108,15 +108,15 @@ func (c *Collector) collectHypervisorVirtualProcessor(ch chan<- prometheus.Metri
ch <- prometheus.MustNewConstMetric(
c.hypervisorVirtualProcessorTimeTotal,
prometheus.CounterValue,
data.HypervisorVirtualProcessorGuestIdleTimePercent,
vmName, coreID, "guest_idle",
data.HypervisorVirtualProcessorGuestRunTimePercent,
vmName, coreID, "guest",
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorVirtualProcessorTimeTotal,
prometheus.CounterValue,
data.HypervisorVirtualProcessorGuestIdleTimePercent,
vmName, coreID, "guest_idle",
data.HypervisorVirtualProcessorRemoteRunTimePercent,
vmName, coreID, "remote",
)
ch <- prometheus.MustNewConstMetric(

View File

@@ -167,7 +167,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
prometheus.Labels{"version": fmt.Sprintf("%d.%d", c.iisVersion.major, c.iisVersion.minor)},
)
errs := make([]error, 0, 4)
errs := make([]error, 0)
if err := c.buildWebService(); err != nil {
errs = append(errs, fmt.Errorf("failed to build Web Service collector: %w", err))
@@ -247,7 +247,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
1,
)
errs := make([]error, 0, 4)
errs := make([]error, 0)
if err := c.collectWebService(ch); err != nil {
errs = append(errs, fmt.Errorf("failed to collect Web Service metrics: %w", err))

View File

@@ -150,13 +150,6 @@ func (c *Collector) Close() error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
c.information = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with logical disk information",
@@ -281,6 +274,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
return nil
}
@@ -588,6 +588,11 @@ func getAllMountedVolumes() (map[string]string, error) {
break
}
if errors.Is(err, windows.ERROR_FILE_NOT_FOUND) {
// the volume is not mounted
break
}
if errors.Is(err, windows.ERROR_NO_MORE_FILES) {
rootPathBuf = make([]uint16, (rootPathLen+1)/2)

View File

@@ -34,6 +34,7 @@ type Config struct{}
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics.
// Deprecated: Use windows_terminal_services_session_info instead.
type Collector struct {
config Config
@@ -64,10 +65,16 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Warn("The logon collector will be removed mid 2025. "+
"See https://github.com/prometheus-community/windows_exporter/pull/1957 for more information. If you see values in this collector"+
" that you need, please open an issue to discuss how to get them into the new collector.",
slog.String("collector", Name),
)
c.sessionInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "session_logon_timestamp_seconds"),
"timestamp of the logon session in seconds.",
"Deprecated. Use windows_terminal_services_session_info instead.",
[]string{"id", "username", "domain", "type"},
nil,
)

View File

@@ -110,13 +110,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Memory collector: %w", err)
}
c.availableBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
@@ -340,13 +333,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Memory collector: %w", err)
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
errs := make([]error, 0)
if err := c.collectPDH(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting memory metrics: %w", err))
@@ -390,6 +390,8 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Memory metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect Memory metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -122,7 +122,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
c.miSession = miSession
errs := make([]error, 0, 5)
errs := make([]error, 0)
if slices.Contains(c.config.CollectorsEnabled, subCollectorCluster) {
if err := c.buildCluster(); err != nil {
@@ -227,7 +227,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
wg.Wait()
close(errCh)
errs := make([]error, 0, 5)
errs := make([]error, 0)
for err := range errCh {
errs = append(errs, err)

View File

@@ -18,6 +18,7 @@ package mscluster
import (
"fmt"
"github.com/Microsoft/hcsshim/osversion"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
@@ -192,7 +193,14 @@ type msClusterCluster struct {
}
func (c *Collector) buildCluster() error {
clusterMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Cluster")
buildNumber := osversion.Build()
wmiSelect := "AddEvictDelay,AdminAccessPoint,AutoAssignNodeSite,AutoBalancerLevel,AutoBalancerMode,BackupInProgress,BlockCacheSize,ClusSvcHangTimeout,ClusSvcRegroupOpeningTimeout,ClusSvcRegroupPruningTimeout,ClusSvcRegroupStageTimeout,ClusSvcRegroupTickInMilliseconds,ClusterEnforcedAntiAffinity,ClusterFunctionalLevel,ClusterGroupWaitDelay,ClusterLogLevel,ClusterLogSize,ClusterUpgradeVersion,CrossSiteDelay,CrossSiteThreshold,CrossSubnetDelay,CrossSubnetThreshold,CsvBalancer,DatabaseReadWriteMode,DefaultNetworkRole,DisableGroupPreferredOwnerRandomization,DrainOnShutdown,DynamicQuorumEnabled,EnableSharedVolumes,FixQuorum,GracePeriodEnabled,GracePeriodTimeout,GroupDependencyTimeout,HangRecoveryAction,IgnorePersistentStateOnStartup,LogResourceControls,LowerQuorumPriorityNodeId,MessageBufferLength,MinimumNeverPreemptPriority,MinimumPreemptorPriority,NetftIPSecEnabled,PlacementOptions,PlumbAllCrossSubnetRoutes,PreventQuorum,QuarantineDuration,QuarantineThreshold,QuorumArbitrationTimeMax,QuorumArbitrationTimeMin,QuorumLogFileSize,QuorumTypeValue,RequestReplyTimeout,ResiliencyDefaultPeriod,ResiliencyLevel,ResourceDllDeadlockPeriod,RootMemoryReserved,RouteHistoryLength,S2DBusTypes,S2DCacheDesiredState,S2DCacheFlashReservePercent,S2DCachePageSizeKBytes,S2DEnabled,S2DIOLatencyThreshold,S2DOptimizations,SameSubnetDelay,SameSubnetThreshold,SecurityLevel,SharedVolumeVssWriterOperationTimeout,ShutdownTimeoutInMinutes,UseClientAccessNetworksForSharedVolumes,WitnessDatabaseWriteTimeout,WitnessDynamicWeight,WitnessRestartInterval"
if buildNumber >= osversion.LTSC2022 {
wmiSelect += ",DetectManagedEvents,SecurityLevelForStorage,MaxNumberOfNodes,DetectManagedEventsThreshold,DetectedCloudPlatform"
}
clusterMIQuery, err := mi.NewQuery(fmt.Sprintf("SELECT %s FROM MSCluster_Cluster", wmiSelect))
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
@@ -852,27 +860,6 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterDetectedCloudPlatform,
prometheus.GaugeValue,
float64(v.DetectedCloudPlatform),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterDetectManagedEvents,
prometheus.GaugeValue,
float64(v.DetectManagedEvents),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterDetectManagedEventsThreshold,
prometheus.GaugeValue,
float64(v.DetectManagedEventsThreshold),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterDisableGroupPreferredOwnerRandomization,
prometheus.GaugeValue,
@@ -957,13 +944,6 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterMaxNumberOfNodes,
prometheus.GaugeValue,
float64(v.MaxNumberOfNodes),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterMessageBufferLength,
prometheus.GaugeValue,
@@ -1167,13 +1147,6 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterSecurityLevelForStorage,
prometheus.GaugeValue,
float64(v.SecurityLevelForStorage),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterSharedVolumeVssWriterOperationTimeout,
prometheus.GaugeValue,
@@ -1215,6 +1188,43 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
float64(v.WitnessRestartInterval),
v.Name,
)
if osversion.Build() >= osversion.LTSC2022 {
ch <- prometheus.MustNewConstMetric(
c.clusterDetectManagedEvents,
prometheus.GaugeValue,
float64(v.DetectManagedEvents),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterDetectManagedEventsThreshold,
prometheus.GaugeValue,
float64(v.DetectManagedEventsThreshold),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterSecurityLevelForStorage,
prometheus.GaugeValue,
float64(v.SecurityLevelForStorage),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterMaxNumberOfNodes,
prometheus.GaugeValue,
float64(v.MaxNumberOfNodes),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.clusterDetectedCloudPlatform,
prometheus.GaugeValue,
float64(v.DetectedCloudPlatform),
v.Name,
)
}
}
return nil

View File

@@ -48,7 +48,7 @@ type msClusterNetwork struct {
}
func (c *Collector) buildNetwork() error {
networkMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Network")
networkMIQuery, err := mi.NewQuery("SELECT Characteristics,Flags,Metric,Role,State FROM MSCluster_Network")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}

View File

@@ -18,6 +18,7 @@ package mscluster
import (
"fmt"
"github.com/Microsoft/hcsshim/osversion"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
@@ -66,7 +67,14 @@ type msClusterNode struct {
}
func (c *Collector) buildNode() error {
nodeMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Node")
buildNumber := osversion.Build()
wmiSelect := "BuildNumber,Characteristics,DynamicWeight,Flags,MajorVersion,MinorVersion,NeedsPreventQuorum,NodeDrainStatus,NodeHighestVersion,NodeLowestVersion,NodeWeight,State,StatusInformation"
if buildNumber >= osversion.LTSC2022 {
wmiSelect += ",DetectedCloudPlatform"
}
nodeMIQuery, err := mi.NewQuery(fmt.Sprintf("SELECT %s FROM MSCluster_Node", wmiSelect))
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}

View File

@@ -74,7 +74,7 @@ type msClusterResource struct {
}
func (c *Collector) buildResource() error {
resourceMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Resource")
resourceMIQuery, err := mi.NewQuery("SELECT Name,Type,OwnerGroup,OwnerNode,Characteristics,DeadlockTimeout,EmbeddedFailureAction,Flags,IsAlivePollInterval,LooksAlivePollInterval,MonitorProcessId,PendingTimeout,ResourceClass,RestartAction,RestartDelay,RestartPeriod,RestartThreshold,RetryPeriodOnFailure,State,Subclass FROM MSCluster_Resource")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}

View File

@@ -66,7 +66,7 @@ type msClusterResourceGroup struct {
}
func (c *Collector) buildResourceGroup() error {
resourceGroupMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_ResourceGroup")
resourceGroupMIQuery, err := mi.NewQuery("SELECT AutoFailbackType,Characteristics,ColdStartSetting,DefaultOwner,FailbackWindowEnd,FailbackWindowStart,FailoverPeriod,FailoverThreshold,Flags,GroupType,OwnerNode,Priority,ResiliencyPeriod,State FROM MSCluster_ResourceGroup")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}

View File

@@ -74,13 +74,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
}
c.bytesInJournalQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"),
"Size of queue journal in bytes",
@@ -106,6 +99,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
}
return nil
}

View File

@@ -333,7 +333,7 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) {
return nil, fmt.Errorf("couldn't get instance info: %w", err)
}
instance, err := newMssqlInstance(instanceVersion)
instance, err := newMssqlInstance(instanceName, instanceVersion)
if err != nil {
return nil, err
}
@@ -348,14 +348,14 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) {
// mssqlGetPerfObjectName returns the name of the Windows Performance
// Counter object for the given SQL instance and Collector.
func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string) string {
func (c *Collector) mssqlGetPerfObjectName(sqlInstance mssqlInstance, collector string) string {
sb := strings.Builder{}
if sqlInstance == "MSSQLSERVER" {
if sqlInstance.isFirstInstance {
sb.WriteString("SQLServer:")
} else {
sb.WriteString("MSSQL$")
sb.WriteString(sqlInstance)
sb.WriteString(sqlInstance.name)
sb.WriteString(":")
}
@@ -369,8 +369,8 @@ func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string)
func (c *Collector) collect(
ch chan<- prometheus.Metric,
collector string,
perfDataCollectors map[string]*pdh.Collector,
collectFn func(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error,
perfDataCollectors map[mssqlInstance]*pdh.Collector,
collectFn func(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error,
) error {
errs := make([]error, 0, len(perfDataCollectors))
@@ -386,11 +386,11 @@ func (c *Collector) collect(
errs = append(errs, err)
success = 0.0
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance, duration),
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance.name, duration),
slog.Any("err", err),
)
} else {
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance, duration))
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance.name, duration))
}
if collector == "" {
@@ -401,13 +401,13 @@ func (c *Collector) collect(
c.mssqlScrapeDurationDesc,
prometheus.GaugeValue,
duration.Seconds(),
collector, sqlInstance,
collector, sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.mssqlScrapeSuccessDesc,
prometheus.GaugeValue,
success,
collector, sqlInstance,
collector, sqlInstance.name,
)
}

View File

@@ -25,7 +25,7 @@ import (
)
type collectorAccessMethods struct {
accessMethodsPerfDataCollectors map[string]*pdh.Collector
accessMethodsPerfDataCollectors map[mssqlInstance]*pdh.Collector
accessMethodsPerfDataObject []perfDataCounterValuesAccessMethods
accessMethodsAUcleanupbatches *prometheus.Desc
@@ -124,11 +124,11 @@ type perfDataCounterValuesAccessMethods struct {
func (c *Collector) buildAccessMethods() error {
var err error
c.accessMethodsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.accessMethodsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.accessMethodsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Access Methods"), nil)
c.accessMethodsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance.name, err))
}
@@ -407,7 +407,7 @@ func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorAccessMethods, c.accessMethodsPerfDataCollectors, c.collectAccessMethodsInstance)
}
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.accessMethodsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err)
@@ -417,308 +417,308 @@ func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sq
c.accessMethodsAUcleanupbatches,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupbatchesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsAUcleanups,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsByReferenceLobCreateCount,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobCreateCount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsByReferenceLobUseCount,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobUseCount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountLobReadahead,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsCountLobReadahead,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountPullInRow,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsCountPullInRow,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountPushOffRow,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsCountPushOffRow,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDeferreddroppedAUs,
prometheus.GaugeValue,
c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedAUs,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDeferredDroppedrowsets,
prometheus.GaugeValue,
c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedRowsets,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDroppedrowsetcleanups,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetCleanupsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDroppedrowsetsskipped,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetsSkippedPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsExtentDeallocations,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsExtentDeallocationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsExtentsAllocated,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsExtentsAllocatedPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedAUcleanupbatches,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsFailedAUCleanupBatchesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedleafpagecookie,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsFailedLeafPageCookie,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedtreepagecookie,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsFailedTreePageCookie,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsForwardedRecords,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsForwardedRecordsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFreeSpacePageFetches,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpacePageFetchesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFreeSpaceScans,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpaceScansPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFullScans,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsFullScansPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsIndexSearches,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsIndexSearchesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsInSysXactwaits,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsInSysXactWaitsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobHandleCreateCount,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleCreateCount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobHandleDestroyCount,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleDestroyCount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderCreateCount,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderCreateCount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderDestroyCount,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderDestroyCount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderTruncationCount,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderTruncationCount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsMixedPageAllocations,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsMixedPageAllocationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageCompressionAttempts,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsPageCompressionAttemptsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageDeallocations,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsPageDeallocationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPagesAllocated,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsPagesAllocatedPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPagesCompressed,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsPagesCompressedPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageSplits,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsPageSplitsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsProbeScans,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsProbeScansPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsRangeScans,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsRangeScansPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsScanPointRevalidations,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsScanPointRevalidationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsSkippedGhostedRecords,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsSkippedGhostedRecordsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsTableLockEscalations,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsTableLockEscalationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsUsedleafpagecookie,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsUsedLeafPageCookie,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsUsedtreepagecookie,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsUsedTreePageCookie,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorkfilesCreated,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsWorkfilesCreatedPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesCreated,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesCreatedPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesFromCacheHits,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatio,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesFromCacheLookups,
prometheus.CounterValue,
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatioBase,
sqlInstance,
sqlInstance.name,
)
return nil

View File

@@ -26,7 +26,7 @@ import (
)
type collectorAvailabilityReplica struct {
availabilityReplicaPerfDataCollectors map[string]*pdh.Collector
availabilityReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector
availabilityReplicaPerfDataObject []perfDataCounterValuesAvailabilityReplica
availReplicaBytesReceivedFromReplica *prometheus.Desc
@@ -57,11 +57,11 @@ type perfDataCounterValuesAvailabilityReplica struct {
func (c *Collector) buildAvailabilityReplica() error {
var err error
c.availabilityReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.availabilityReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.availabilityReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Availability Replica"), pdh.InstancesAll)
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance.name, err))
}
@@ -130,7 +130,7 @@ func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) erro
return c.collect(ch, subCollectorAvailabilityReplica, c.availabilityReplicaPerfDataCollectors, c.collectAvailabilityReplicaInstance)
}
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.availabilityReplicaPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), err)
@@ -141,63 +141,63 @@ func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metr
c.availReplicaBytesReceivedFromReplica,
prometheus.CounterValue,
data.AvailReplicaBytesReceivedFromReplicaPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesSentToReplica,
prometheus.CounterValue,
data.AvailReplicaBytesSentToReplicaPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesSentToTransport,
prometheus.CounterValue,
data.AvailReplicaBytesSentToTransportPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaFlowControl,
prometheus.CounterValue,
data.AvailReplicaFlowControlPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaFlowControlTimeMS,
prometheus.CounterValue,
utils.MilliSecToSec(data.AvailReplicaFlowControlTimeMSPerSec),
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaReceivesFromReplica,
prometheus.CounterValue,
data.AvailReplicaReceivesFromReplicaPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaResentMessages,
prometheus.CounterValue,
data.AvailReplicaResentMessagesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaSendsToReplica,
prometheus.CounterValue,
data.AvailReplicaSendsToReplicaPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaSendsToTransport,
prometheus.CounterValue,
data.AvailReplicaSendsToTransportPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
}

View File

@@ -25,7 +25,7 @@ import (
)
type collectorBufferManager struct {
bufManPerfDataCollectors map[string]*pdh.Collector
bufManPerfDataCollectors map[mssqlInstance]*pdh.Collector
bufManPerfDataObject []perfDataCounterValuesBufMan
bufManBackgroundwriterpages *prometheus.Desc
@@ -82,11 +82,11 @@ type perfDataCounterValuesBufMan struct {
func (c *Collector) buildBufferManager() error {
var err error
c.bufManPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.bufManPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.bufManPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Buffer Manager"), nil)
c.bufManPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance.name, err))
}
@@ -238,7 +238,7 @@ func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorBufferManager, c.bufManPerfDataCollectors, c.collectBufferManagerInstance)
}
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.bufManPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), err)
@@ -249,161 +249,161 @@ func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sq
c.bufManBackgroundwriterpages,
prometheus.CounterValue,
data.BufManBackgroundWriterPagesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManBuffercachehits,
prometheus.GaugeValue,
data.BufManBufferCacheHitRatio,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManBuffercachelookups,
prometheus.GaugeValue,
data.BufManBufferCacheHitRatioBase,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManCheckpointpages,
prometheus.CounterValue,
data.BufManCheckpointPagesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManDatabasepages,
prometheus.GaugeValue,
data.BufManDatabasePages,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionallocatedpages,
prometheus.GaugeValue,
data.BufManExtensionAllocatedPages,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionfreepages,
prometheus.GaugeValue,
data.BufManExtensionFreePages,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensioninuseaspercentage,
prometheus.GaugeValue,
data.BufManExtensionInUseAsPercentage,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionoutstandingIOcounter,
prometheus.GaugeValue,
data.BufManExtensionOutstandingIOCounter,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpageevictions,
prometheus.CounterValue,
data.BufManExtensionPageEvictionsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpagereads,
prometheus.CounterValue,
data.BufManExtensionPageReadsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpageunreferencedtime,
prometheus.GaugeValue,
data.BufManExtensionPageUnreferencedTime,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpagewrites,
prometheus.CounterValue,
data.BufManExtensionPageWritesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManFreeliststalls,
prometheus.CounterValue,
data.BufManFreeListStallsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManIntegralControllerSlope,
prometheus.GaugeValue,
data.BufManIntegralControllerSlope,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManLazywrites,
prometheus.CounterValue,
data.BufManLazyWritesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagelifeexpectancy,
prometheus.GaugeValue,
data.BufManPageLifeExpectancy,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagelookups,
prometheus.CounterValue,
data.BufManPageLookupsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagereads,
prometheus.CounterValue,
data.BufManPageReadsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagewrites,
prometheus.CounterValue,
data.BufManPageWritesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManReadaheadpages,
prometheus.CounterValue,
data.BufManReadaheadPagesPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManReadaheadtime,
prometheus.CounterValue,
data.BufManReadaheadTimePerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.bufManTargetpages,
prometheus.GaugeValue,
data.BufManTargetPages,
sqlInstance,
sqlInstance.name,
)
}

View File

@@ -25,8 +25,8 @@ import (
)
type collectorDatabases struct {
databasesPerfDataCollectors map[string]*pdh.Collector
databasesPerfDataCollectors2019 map[string]*pdh.Collector
databasesPerfDataCollectors map[mssqlInstance]*pdh.Collector
databasesPerfDataCollectors2019 map[mssqlInstance]*pdh.Collector
databasesPerfDataObject []perfDataCounterValuesDatabases
databasesPerfDataObject2019 []perfDataCounterValuesDatabases2019
@@ -141,18 +141,18 @@ type perfDataCounterValuesDatabases2019 struct {
func (c *Collector) buildDatabases() error {
var err error
c.databasesPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.databasesPerfDataCollectors2019 = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.databasesPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
c.databasesPerfDataCollectors2019 = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.databasesPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
c.databasesPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance.name, err))
}
if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) {
c.databasesPerfDataCollectors2019[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
c.databasesPerfDataCollectors2019[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Databases 2019 collector for instance %s: %w", sqlInstance.name, err))
}
@@ -458,7 +458,7 @@ func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error {
)
}
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.databasesPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
@@ -469,336 +469,336 @@ func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlIns
c.databasesActiveTransactions,
prometheus.GaugeValue,
data.DatabasesActiveTransactions,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBackupPerRestoreThroughput,
prometheus.CounterValue,
data.DatabasesBackupPerRestoreThroughputPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBulkCopyRows,
prometheus.CounterValue,
data.DatabasesBulkCopyRowsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBulkCopyThroughput,
prometheus.CounterValue,
data.DatabasesBulkCopyThroughputPerSec*1024,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesCommitTableEntries,
prometheus.GaugeValue,
data.DatabasesCommitTableEntries,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesDataFilesSizeKB,
prometheus.GaugeValue,
data.DatabasesDataFilesSizeKB*1024,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesDBCCLogicalScanBytes,
prometheus.CounterValue,
data.DatabasesDBCCLogicalScanBytesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesGroupCommitTime,
prometheus.CounterValue,
data.DatabasesGroupCommitTimePerSec/1000000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogBytesFlushed,
prometheus.CounterValue,
data.DatabasesLogBytesFlushedPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheHits,
prometheus.GaugeValue,
data.DatabasesLogCacheHitRatio,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheLookups,
prometheus.GaugeValue,
data.DatabasesLogCacheHitRatioBase,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheReads,
prometheus.CounterValue,
data.DatabasesLogCacheReadsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFilesSizeKB,
prometheus.GaugeValue,
data.DatabasesLogFilesSizeKB*1024,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFilesUsedSizeKB,
prometheus.GaugeValue,
data.DatabasesLogFilesUsedSizeKB*1024,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushes,
prometheus.CounterValue,
data.DatabasesLogFlushesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWaits,
prometheus.CounterValue,
data.DatabasesLogFlushWaitsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWaitTime,
prometheus.GaugeValue,
data.DatabasesLogFlushWaitTime/1000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWriteTimeMS,
prometheus.GaugeValue,
data.DatabasesLogFlushWriteTimeMS/1000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogGrowths,
prometheus.GaugeValue,
data.DatabasesLogGrowths,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolCacheMisses,
prometheus.CounterValue,
data.DatabasesLogPoolCacheMissesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolDiskReads,
prometheus.CounterValue,
data.DatabasesLogPoolDiskReadsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolHashDeletes,
prometheus.CounterValue,
data.DatabasesLogPoolHashDeletesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolHashInserts,
prometheus.CounterValue,
data.DatabasesLogPoolHashInsertsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolInvalidHashEntry,
prometheus.CounterValue,
data.DatabasesLogPoolInvalidHashEntryPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolLogScanPushes,
prometheus.CounterValue,
data.DatabasesLogPoolLogScanPushesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolLogWriterPushes,
prometheus.CounterValue,
data.DatabasesLogPoolLogWriterPushesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushEmptyFreePool,
prometheus.CounterValue,
data.DatabasesLogPoolPushEmptyFreePoolPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushLowMemory,
prometheus.CounterValue,
data.DatabasesLogPoolPushLowMemoryPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushNoFreeBuffer,
prometheus.CounterValue,
data.DatabasesLogPoolPushNoFreeBufferPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolReqBehindTrunc,
prometheus.CounterValue,
data.DatabasesLogPoolReqBehindTruncPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolRequestsOldVLF,
prometheus.CounterValue,
data.DatabasesLogPoolRequestsOldVLFPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolRequests,
prometheus.CounterValue,
data.DatabasesLogPoolRequestsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolTotalActiveLogSize,
prometheus.GaugeValue,
data.DatabasesLogPoolTotalActiveLogSize,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolTotalSharedPoolSize,
prometheus.GaugeValue,
data.DatabasesLogPoolTotalSharedPoolSize,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogShrinks,
prometheus.GaugeValue,
data.DatabasesLogShrinks,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogTruncations,
prometheus.GaugeValue,
data.DatabasesLogTruncations,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesPercentLogUsed,
prometheus.GaugeValue,
data.DatabasesPercentLogUsed,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesReplPendingXacts,
prometheus.GaugeValue,
data.DatabasesReplPendingXacts,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesReplTransRate,
prometheus.CounterValue,
data.DatabasesReplTransRate,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesShrinkDataMovementBytes,
prometheus.CounterValue,
data.DatabasesShrinkDataMovementBytesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesTrackedTransactions,
prometheus.CounterValue,
data.DatabasesTrackedTransactionsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesTransactions,
prometheus.CounterValue,
data.DatabasesTransactionsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesWriteTransactions,
prometheus.CounterValue,
data.DatabasesWriteTransactionsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerDLCLatencyPerFetch,
prometheus.GaugeValue,
data.DatabasesXTPControllerDLCLatencyPerFetch,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerDLCPeakLatency,
prometheus.GaugeValue,
data.DatabasesXTPControllerDLCPeakLatency*1000000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerLogProcessed,
prometheus.CounterValue,
data.DatabasesXTPControllerLogProcessedPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPMemoryUsedKB,
prometheus.GaugeValue,
data.DatabasesXTPMemoryUsedKB*1024,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
}
return nil
}
func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.databasesPerfDataObject2019)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
@@ -809,7 +809,7 @@ func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sq
c.databasesActiveParallelRedoThreads,
prometheus.GaugeValue,
data.DatabasesActiveParallelRedoThreads,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
}

View File

@@ -25,7 +25,7 @@ import (
)
type collectorDatabaseReplica struct {
dbReplicaPerfDataCollectors map[string]*pdh.Collector
dbReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector
dbReplicaPerfDataObject []perfDataCounterValuesDBReplica
dbReplicaDatabaseFlowControlDelay *prometheus.Desc
@@ -86,11 +86,11 @@ type perfDataCounterValuesDBReplica struct {
func (c *Collector) buildDatabaseReplica() error {
var err error
c.dbReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.dbReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.dbReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Database Replica"), pdh.InstancesAll)
c.dbReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance.name, err))
}
@@ -249,7 +249,7 @@ func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorDatabaseReplica, c.dbReplicaPerfDataCollectors, c.collectDatabaseReplicaInstance)
}
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.dbReplicaPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), err)
@@ -260,168 +260,168 @@ func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric,
c.dbReplicaDatabaseFlowControlDelay,
prometheus.GaugeValue,
data.DbReplicaDatabaseFlowControlDelay,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaDatabaseFlowControls,
prometheus.CounterValue,
data.DbReplicaDatabaseFlowControlsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaFileBytesReceived,
prometheus.CounterValue,
data.DbReplicaFileBytesReceivedPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaGroupCommits,
prometheus.CounterValue,
data.DbReplicaGroupCommitsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaGroupCommitTime,
prometheus.GaugeValue,
data.DbReplicaGroupCommitTime,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogApplyPendingQueue,
prometheus.GaugeValue,
data.DbReplicaLogApplyPendingQueue,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogApplyReadyQueue,
prometheus.GaugeValue,
data.DbReplicaLogApplyReadyQueue,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesCompressed,
prometheus.CounterValue,
data.DbReplicaLogBytesCompressedPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesDecompressed,
prometheus.CounterValue,
data.DbReplicaLogBytesDecompressedPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesReceived,
prometheus.CounterValue,
data.DbReplicaLogBytesReceivedPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressionCachehits,
prometheus.CounterValue,
data.DbReplicaLogCompressionCacheHitsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressionCachemisses,
prometheus.CounterValue,
data.DbReplicaLogCompressionCacheMissesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressions,
prometheus.CounterValue,
data.DbReplicaLogCompressionsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogDecompressions,
prometheus.CounterValue,
data.DbReplicaLogDecompressionsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogremainingforundo,
prometheus.GaugeValue,
data.DbReplicaLogRemainingForUndo,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogSendQueue,
prometheus.GaugeValue,
data.DbReplicaLogSendQueue,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaMirroredWritetransactions,
prometheus.CounterValue,
data.DbReplicaMirroredWriteTransactionsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRecoveryQueue,
prometheus.GaugeValue,
data.DbReplicaRecoveryQueue,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoblocked,
prometheus.CounterValue,
data.DbReplicaRedoBlockedPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoBytesRemaining,
prometheus.GaugeValue,
data.DbReplicaRedoBytesRemaining,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoneBytes,
prometheus.CounterValue,
data.DbReplicaRedoneBytesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedones,
prometheus.CounterValue,
data.DbReplicaRedonesPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaTotalLogrequiringundo,
prometheus.GaugeValue,
data.DbReplicaTotalLogRequiringUndo,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaTransactionDelay,
prometheus.GaugeValue,
data.DbReplicaTransactionDelay/1000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
}

View File

@@ -25,7 +25,7 @@ import (
)
type collectorGeneralStatistics struct {
genStatsPerfDataCollectors map[string]*pdh.Collector
genStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
genStatsPerfDataObject []perfDataCounterValuesGenStats
genStatsActiveTempTables *prometheus.Desc
@@ -84,11 +84,11 @@ type perfDataCounterValuesGenStats struct {
func (c *Collector) buildGeneralStatistics() error {
var err error
c.genStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.genStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.genStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "General Statistics"), nil)
c.genStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance.name, err))
}
@@ -247,7 +247,7 @@ func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error
return c.collect(ch, subCollectorGeneralStatistics, c.genStatsPerfDataCollectors, c.collectGeneralStatisticsInstance)
}
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.genStatsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err)
@@ -257,168 +257,168 @@ func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric
c.genStatsActiveTempTables,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsActiveTempTables,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsConnectionReset,
prometheus.CounterValue,
c.genStatsPerfDataObject[0].GenStatsConnectionResetPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsEventNotificationsDelayedDrop,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsEventNotificationsDelayedDrop,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsHTTPAuthenticatedRequests,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsHTTPAuthenticatedRequests,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogicalConnections,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsLogicalConnections,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogins,
prometheus.CounterValue,
c.genStatsPerfDataObject[0].GenStatsLoginsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogouts,
prometheus.CounterValue,
c.genStatsPerfDataObject[0].GenStatsLogoutsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsMarsDeadlocks,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsMarsDeadlocks,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsNonAtomicYieldRate,
prometheus.CounterValue,
c.genStatsPerfDataObject[0].GenStatsNonatomicYieldRate,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsProcessesBlocked,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsProcessesBlocked,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPEmptyRequests,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsSOAPEmptyRequests,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPMethodInvocations,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsSOAPMethodInvocations,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSessionInitiateRequests,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsSOAPSessionInitiateRequests,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSessionTerminateRequests,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsSOAPSessionTerminateRequests,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSQLRequests,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsSOAPSQLRequests,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPWSDLRequests,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsSOAPWSDLRequests,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSQLTraceIOProviderLockWaits,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsSQLTraceIOProviderLockWaits,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempDBRecoveryUnitID,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsTempdbRecoveryUnitID,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempDBrowSetID,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsTempdbRowsetID,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempTablesCreationRate,
prometheus.CounterValue,
c.genStatsPerfDataObject[0].GenStatsTempTablesCreationRate,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempTablesForDestruction,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsTempTablesForDestruction,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTraceEventNotificationQueue,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsTraceEventNotificationQueue,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTransactions,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsTransactions,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsUserConnections,
prometheus.GaugeValue,
c.genStatsPerfDataObject[0].GenStatsUserConnections,
sqlInstance,
sqlInstance.name,
)
return nil

View File

@@ -25,7 +25,7 @@ import (
)
type collectorLocks struct {
locksPerfDataCollectors map[string]*pdh.Collector
locksPerfDataCollectors map[mssqlInstance]*pdh.Collector
locksPerfDataObject []perfDataCounterValuesLocks
// Win32_PerfRawData_{instance}_SQLServerLocks
@@ -55,11 +55,11 @@ type perfDataCounterValuesLocks struct {
func (c *Collector) buildLocks() error {
var err error
c.locksPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.locksPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.locksPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Locks"), pdh.InstancesAll)
c.locksPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Locks"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance.name, err))
}
@@ -121,7 +121,7 @@ func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorLocks, c.locksPerfDataCollectors, c.collectLocksInstance)
}
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.locksPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Locks"), err)
@@ -132,56 +132,56 @@ func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstanc
c.locksWaitTime,
prometheus.GaugeValue,
data.LocksAverageWaitTimeMS/1000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksCount,
prometheus.GaugeValue,
data.LocksAverageWaitTimeMSBase/1000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockRequests,
prometheus.CounterValue,
data.LocksLockRequestsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockTimeouts,
prometheus.CounterValue,
data.LocksLockTimeoutsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockTimeoutstimeout0,
prometheus.CounterValue,
data.LocksLockTimeoutsTimeout0PerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockWaits,
prometheus.CounterValue,
data.LocksLockWaitsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockWaitTimeMS,
prometheus.GaugeValue,
data.LocksLockWaitTimeMS/1000.0,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.locksNumberOfDeadlocks,
prometheus.CounterValue,
data.LocksNumberOfDeadlocksPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
}

View File

@@ -25,7 +25,7 @@ import (
)
type collectorMemoryManager struct {
memMgrPerfDataCollectors map[string]*pdh.Collector
memMgrPerfDataCollectors map[mssqlInstance]*pdh.Collector
memMgrPerfDataObject []perfDataCounterValuesMemMgr
memMgrConnectionMemoryKB *prometheus.Desc
@@ -76,11 +76,11 @@ type perfDataCounterValuesMemMgr struct {
func (c *Collector) buildMemoryManager() error {
var err error
c.memMgrPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.memMgrPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.memMgrPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Memory Manager"), pdh.InstancesAll)
c.memMgrPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance.name, err))
}
@@ -214,7 +214,7 @@ func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorMemoryManager, c.memMgrPerfDataCollectors, c.collectMemoryManagerInstance)
}
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.memMgrPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err)
@@ -224,140 +224,140 @@ func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sq
c.memMgrConnectionMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrConnectionMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrDatabaseCacheMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrDatabaseCacheMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrExternalBenefitOfMemory,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrExternalBenefitOfMemory,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrFreeMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrFreeMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrGrantedWorkspaceMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrGrantedWorkspaceMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockBlocks,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrLockBlocks,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockBlocksAllocated,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrLockBlocksAllocated,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrLockMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockOwnerBlocks,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocks,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockOwnerBlocksAllocated,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocksAllocated,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLogPoolMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrLogPoolMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMaximumWorkspaceMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrMaximumWorkspaceMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMemoryGrantsOutstanding,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrMemoryGrantsOutstanding,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMemoryGrantsPending,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrMemoryGrantsPending,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrOptimizerMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrOptimizerMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrReservedServerMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrReservedServerMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrSQLCacheMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrSQLCacheMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrStolenServerMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrStolenServerMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrTargetServerMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrTargetServerMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrTotalServerMemoryKB,
prometheus.GaugeValue,
c.memMgrPerfDataObject[0].MemMgrTotalServerMemoryKB*1024,
sqlInstance,
sqlInstance.name,
)
return nil

View File

@@ -25,7 +25,7 @@ import (
)
type collectorSQLErrors struct {
sqlErrorsPerfDataCollectors map[string]*pdh.Collector
sqlErrorsPerfDataCollectors map[mssqlInstance]*pdh.Collector
sqlErrorsPerfDataObject []perfDataCounterValuesSqlErrors
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
@@ -41,11 +41,11 @@ type perfDataCounterValuesSqlErrors struct {
func (c *Collector) buildSQLErrors() error {
var err error
c.sqlErrorsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.sqlErrorsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.sqlErrorsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Errors"), pdh.InstancesAll)
c.sqlErrorsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance.name, err))
}
@@ -66,7 +66,7 @@ func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorSQLErrors, c.sqlErrorsPerfDataCollectors, c.collectSQLErrorsInstance)
}
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.sqlErrorsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), err)
@@ -77,7 +77,7 @@ func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlIns
c.sqlErrorsTotal,
prometheus.CounterValue,
data.SqlErrorsErrorsPerSec,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
}

View File

@@ -25,7 +25,7 @@ import (
)
type collectorSQLStats struct {
sqlStatsPerfDataCollectors map[string]*pdh.Collector
sqlStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
sqlStatsPerfDataObject []perfDataCounterValuesSqlStats
sqlStatsAutoParamAttempts *prometheus.Desc
@@ -58,11 +58,11 @@ type perfDataCounterValuesSqlStats struct {
func (c *Collector) buildSQLStats() error {
var err error
c.sqlStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.sqlStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.sqlStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Statistics"), nil)
c.sqlStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance.name, err))
}
@@ -142,7 +142,7 @@ func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorSQLStats, c.sqlStatsPerfDataCollectors, c.collectSQLStatsInstance)
}
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.sqlStatsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err)
@@ -152,77 +152,77 @@ func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInst
c.sqlStatsAutoParamAttempts,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsAutoParamAttemptsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsBatchRequests,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsBatchRequestsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsFailedAutoParams,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsFailedAutoParamsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsForcedParameterizations,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsForcedParameterizationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsGuidedplanexecutions,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsGuidedplanexecutionsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsMisguidedplanexecutions,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsMisguidedplanexecutionsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSafeAutoParams,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsSafeAutoParamsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLAttentionrate,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsSQLAttentionrate,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLCompilations,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsSQLCompilationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLReCompilations,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsSQLReCompilationsPerSec,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsUnsafeAutoParams,
prometheus.CounterValue,
c.sqlStatsPerfDataObject[0].SqlStatsUnsafeAutoParamsPerSec,
sqlInstance,
sqlInstance.name,
)
return nil

View File

@@ -25,7 +25,7 @@ import (
)
type collectorTransactions struct {
transactionsPerfDataCollectors map[string]*pdh.Collector
transactionsPerfDataCollectors map[mssqlInstance]*pdh.Collector
transactionsPerfDataObject []perfDataCounterValuesTransactions
transactionsTempDbFreeSpaceBytes *prometheus.Desc
@@ -62,11 +62,11 @@ type perfDataCounterValuesTransactions struct {
func (c *Collector) buildTransactions() error {
var err error
c.transactionsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.transactionsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.transactionsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Transactions"), nil)
c.transactionsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance.name, err))
}
@@ -160,7 +160,7 @@ func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error {
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.transactionsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err)
@@ -170,91 +170,91 @@ func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sql
c.transactionsTempDbFreeSpaceBytes,
prometheus.GaugeValue,
c.transactionsPerfDataObject[0].TransactionsFreeSpaceintempdbKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsLongestTransactionRunningSeconds,
prometheus.GaugeValue,
c.transactionsPerfDataObject[0].TransactionsLongestTransactionRunningTime,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsNonSnapshotVersionActiveTotal,
prometheus.CounterValue,
c.transactionsPerfDataObject[0].TransactionsNonSnapshotVersionTransactions,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsSnapshotActiveTotal,
prometheus.CounterValue,
c.transactionsPerfDataObject[0].TransactionsSnapshotTransactions,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsActive,
prometheus.GaugeValue,
c.transactionsPerfDataObject[0].TransactionsTransactions,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsUpdateConflictsTotal,
prometheus.CounterValue,
c.transactionsPerfDataObject[0].TransactionsUpdateconflictratio,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsUpdateSnapshotActiveTotal,
prometheus.CounterValue,
c.transactionsPerfDataObject[0].TransactionsUpdateSnapshotTransactions,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionCleanupRateBytes,
prometheus.GaugeValue,
c.transactionsPerfDataObject[0].TransactionsVersionCleanuprateKBPers*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionGenerationRateBytes,
prometheus.GaugeValue,
c.transactionsPerfDataObject[0].TransactionsVersionGenerationrateKBPers*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreSizeBytes,
prometheus.GaugeValue,
c.transactionsPerfDataObject[0].TransactionsVersionStoreSizeKB*1024,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreUnits,
prometheus.CounterValue,
c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcount,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreCreationUnits,
prometheus.CounterValue,
c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcreation,
sqlInstance,
sqlInstance.name,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreTruncationUnits,
prometheus.CounterValue,
c.transactionsPerfDataObject[0].TransactionsVersionStoreunittruncation,
sqlInstance,
sqlInstance.name,
)
return nil

View File

@@ -25,7 +25,7 @@ import (
)
type collectorWaitStats struct {
waitStatsPerfDataCollectors map[string]*pdh.Collector
waitStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
waitStatsPerfDataObject []perfDataCounterValuesWaitStats
waitStatsLockWaits *prometheus.Desc
@@ -62,11 +62,11 @@ type perfDataCounterValuesWaitStats struct {
func (c *Collector) buildWaitStats() error {
var err error
c.waitStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
c.waitStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances {
c.waitStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Wait Statistics"), pdh.InstancesAll)
c.waitStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance.name, err))
}
@@ -153,7 +153,7 @@ func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorWaitStats, c.waitStatsPerfDataCollectors, c.collectWaitStatsInstance)
}
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
err := perfDataCollector.Collect(&c.waitStatsPerfDataObject)
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), err)
@@ -164,84 +164,84 @@ func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlIns
c.waitStatsLockWaits,
prometheus.CounterValue,
data.WaitStatsLockWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsMemoryGrantQueueWaits,
prometheus.CounterValue,
data.WaitStatsMemoryGrantQueueWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsThreadSafeMemoryObjectsWaits,
prometheus.CounterValue,
data.WaitStatsThreadSafeMemoryObjectsWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsLogWriteWaits,
prometheus.CounterValue,
data.WaitStatsLogWriteWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsLogBufferWaits,
prometheus.CounterValue,
data.WaitStatsLogBufferWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsNetworkIOWaits,
prometheus.CounterValue,
data.WaitStatsNetworkIOWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsPageIOLatchWaits,
prometheus.CounterValue,
data.WaitStatsPageIOLatchWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsPageLatchWaits,
prometheus.CounterValue,
data.WaitStatsPageLatchWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsNonPageLatchWaits,
prometheus.CounterValue,
data.WaitStatsNonpageLatchWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsWaitForTheWorkerWaits,
prometheus.CounterValue,
data.WaitStatsWaitForTheWorkerWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsWorkspaceSynchronizationWaits,
prometheus.CounterValue,
data.WaitStatsWorkspaceSynchronizationWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsTransactionOwnershipWaits,
prometheus.CounterValue,
data.WaitStatsTransactionOwnershipWaits,
sqlInstance, data.Name,
sqlInstance.name, data.Name,
)
}

View File

@@ -8,13 +8,14 @@ import (
)
type mssqlInstance struct {
name string
majorVersion mssqlServerMajorVersion
patchVersion string
edition string
name string
majorVersion mssqlServerMajorVersion
patchVersion string
edition string
isFirstInstance bool
}
func newMssqlInstance(name string) (mssqlInstance, error) {
func newMssqlInstance(key, name string) (mssqlInstance, error) {
regKey := fmt.Sprintf(`Software\Microsoft\Microsoft SQL Server\%s\Setup`, name)
k, err := registry.OpenKey(registry.LOCAL_MACHINE, regKey, registry.QUERY_VALUE)
@@ -39,10 +40,11 @@ func newMssqlInstance(name string) (mssqlInstance, error) {
_, name, _ = strings.Cut(name, ".")
return mssqlInstance{
edition: edition,
name: name,
majorVersion: newMajorVersion(patchVersion),
patchVersion: patchVersion,
edition: edition,
name: name,
majorVersion: newMajorVersion(patchVersion),
patchVersion: patchVersion,
isFirstInstance: key == "MSSQLSERVER",
}, nil
}

View File

@@ -33,7 +33,12 @@ import (
"golang.org/x/sys/windows"
)
const Name = "net"
const (
Name = "net"
subCollectorMetrics = "metrics"
subCollectorNicInfo = "nic_addresses"
)
type Config struct {
NicExclude *regexp.Regexp `yaml:"nic_exclude"`
@@ -46,8 +51,8 @@ var ConfigDefaults = Config{
NicExclude: types.RegExpEmpty,
NicInclude: types.RegExpAny,
CollectorsEnabled: []string{
"metrics",
"nic_addresses",
subCollectorMetrics,
subCollectorNicInfo,
},
}
@@ -157,17 +162,12 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Network Interface collector: %w", err)
}
if slices.Contains(c.config.CollectorsEnabled, "addresses") {
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
slog.String("collector", Name),
)
for _, collector := range c.config.CollectorsEnabled {
if !slices.Contains([]string{subCollectorMetrics, subCollectorNicInfo}, collector) {
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
strings.Join([]string{subCollectorMetrics, subCollectorNicInfo}, ", "),
)
}
}
c.bytesReceivedTotal = prometheus.NewDesc(
@@ -261,21 +261,34 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Network Interface collector: %w", err)
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) {
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
slog.String("collector", Name),
)
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
errs := make([]error, 0)
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
if err := c.collect(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting metrics: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, "nic_addresses") {
if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) {
if err := c.collectNICAddresses(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting net addresses: %w", err))
}

View File

@@ -26,25 +26,25 @@ import (
func (c *Collector) buildClrExceptions() {
c.numberOfExceptionsThrown = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exceptions_thrown_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_exceptions_thrown_total"),
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
[]string{"process"},
nil,
)
c.numberOfFilters = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exceptions_filters_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_exceptions_filters_total"),
"Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled.",
[]string{"process"},
nil,
)
c.numberOfFinally = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exceptions_finallys_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_exceptions_finallys_total"),
"Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter.",
[]string{"process"},
nil,
)
c.throwToCatchDepth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "throw_to_catch_depth_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_throw_to_catch_depth_total"),
"Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception.",
[]string{"process"},
nil,

View File

@@ -26,19 +26,19 @@ import (
func (c *Collector) buildClrInterop() {
c.numberOfCCWs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "com_callable_wrappers_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrInterop+"_com_callable_wrappers_total"),
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
[]string{"process"},
nil,
)
c.numberOfMarshalling = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "interop_marshalling_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrInterop+"_interop_marshalling_total"),
"Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started.",
[]string{"process"},
nil,
)
c.numberOfStubs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "interop_stubs_created_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrInterop+"_interop_stubs_created_total"),
"Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.",
[]string{"process"},
nil,

View File

@@ -26,25 +26,25 @@ import (
func (c *Collector) buildClrJIT() {
c.numberOfMethodsJitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_methods_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_methods_total"),
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
[]string{"process"},
nil,
)
c.timeInJit = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_time_percent"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_time_percent"),
"Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.",
[]string{"process"},
nil,
)
c.standardJitFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_standard_failures_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_standard_failures_total"),
"Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.",
[]string{"process"},
nil,
)
c.totalNumberOfILBytesJitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_il_bytes_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_il_bytes_total"),
"Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started",
[]string{"process"},
nil,

View File

@@ -26,55 +26,55 @@ import (
func (c *Collector) buildClrLoading() {
c.bytesInLoaderHeap = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "loader_heap_size_bytes"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_loader_heap_size_bytes"),
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
[]string{"process"},
nil,
)
c.currentAppDomains = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_current"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_appdomains_loaded_current"),
"Displays the current number of application domains loaded in this application.",
[]string{"process"},
nil,
)
c.currentAssemblies = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_current"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_assemblies_loaded_current"),
"Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
[]string{"process"},
nil,
)
c.currentClassesLoaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_current"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_classes_loaded_current"),
"Displays the current number of classes loaded in all assemblies.",
[]string{"process"},
nil,
)
c.totalAppDomains = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_appdomains_loaded_total"),
"Displays the peak number of application domains loaded since the application started.",
[]string{"process"},
nil,
)
c.totalAppDomainsUnloaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "appdomains_unloaded_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_appdomains_unloaded_total"),
"Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded.",
[]string{"process"},
nil,
)
c.totalAssemblies = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_assemblies_loaded_total"),
"Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
[]string{"process"},
nil,
)
c.totalClassesLoaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_classes_loaded_total"),
"Displays the cumulative number of classes loaded in all assemblies since the application started.",
[]string{"process"},
nil,
)
c.totalNumberOfLoadFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "class_load_failures_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_class_load_failures_total"),
"Displays the peak number of classes that have failed to load since the application started.",
[]string{"process"},
nil,

View File

@@ -26,43 +26,43 @@ import (
func (c *Collector) buildClrLocksAndThreads() {
c.currentQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_queue_length"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_current_queue_length"),
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
[]string{"process"},
nil,
)
c.numberOfCurrentLogicalThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_logical_threads"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_current_logical_threads"),
"Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. ",
[]string{"process"},
nil,
)
c.numberOfCurrentPhysicalThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "physical_threads_current"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_physical_threads_current"),
"Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process.",
[]string{"process"},
nil,
)
c.numberOfCurrentRecognizedThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_current"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_recognized_threads_current"),
"Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
[]string{"process"},
nil,
)
c.numberOfTotalRecognizedThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_recognized_threads_total"),
"Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
[]string{"process"},
nil,
)
c.queueLengthPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "queue_length_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_queue_length_total"),
"Displays the total number of threads that waited to acquire a managed lock since the application started.",
[]string{"process"},
nil,
)
c.totalNumberOfContentions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "contentions_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_contentions_total"),
"Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully.",
[]string{"process"},
nil,

View File

@@ -26,73 +26,73 @@ import (
func (c *Collector) buildClrMemory() {
c.allocatedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "allocated_bytes_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_allocated_bytes_total"),
"Displays the total number of bytes allocated on the garbage collection heap.",
[]string{"process"},
nil,
)
c.finalizationSurvivors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "finalization_survivors"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_finalization_survivors"),
"Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized.",
[]string{"process"},
nil,
)
c.heapSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "heap_size_bytes"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_heap_size_bytes"),
"Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated.",
[]string{"process", "area"},
nil,
)
c.promotedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "promoted_bytes"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_promoted_bytes"),
"Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection.",
[]string{"process", "area"},
nil,
)
c.numberGCHandles = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "number_gc_handles"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_number_gc_handles"),
"Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment.",
[]string{"process"},
nil,
)
c.numberCollections = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "collections_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_collections_total"),
"Displays the number of times the generation objects are garbage collected since the application started.",
[]string{"process", "area"},
nil,
)
c.numberInducedGC = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "induced_gc_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_induced_gc_total"),
"Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect.",
[]string{"process"},
nil,
)
c.numberOfPinnedObjects = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "number_pinned_objects"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_number_pinned_objects"),
"Displays the number of pinned objects encountered in the last garbage collection.",
[]string{"process"},
nil,
)
c.numberOfSinkBlocksInUse = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "number_sink_blocksinuse"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_number_sink_blocksinuse"),
"Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.",
[]string{"process"},
nil,
)
c.numberTotalCommittedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "committed_bytes"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_committed_bytes"),
"Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file.",
[]string{"process"},
nil,
)
c.numberTotalReservedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "reserved_bytes"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_reserved_bytes"),
"Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.",
[]string{"process"},
nil,
)
c.timeInGC = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gc_time_percent"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_gc_time_percent"),
"Displays the percentage of time that was spent performing a garbage collection in the last sample.",
[]string{"process"},
nil,

View File

@@ -26,37 +26,37 @@ import (
func (c *Collector) buildClrRemoting() {
c.channels = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "channels_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_channels_total"),
"Displays the total number of remoting channels registered across all application domains since application started.",
[]string{"process"},
nil,
)
c.contextBoundClassesLoaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_bound_classes_loaded"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_context_bound_classes_loaded"),
"Displays the current number of context-bound classes that are loaded.",
[]string{"process"},
nil,
)
c.contextBoundObjects = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_bound_objects_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_context_bound_objects_total"),
"Displays the total number of context-bound objects allocated.",
[]string{"process"},
nil,
)
c.contextProxies = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_proxies_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_context_proxies_total"),
"Displays the total number of remoting proxy objects in this process since it started.",
[]string{"process"},
nil,
)
c.contexts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "contexts"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_contexts"),
"Displays the current number of remoting contexts in the application.",
[]string{"process"},
nil,
)
c.totalRemoteCalls = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "remote_calls_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_remote_calls_total"),
"Displays the total number of remote procedure calls invoked since the application started.",
[]string{"process"},
nil,

View File

@@ -26,25 +26,25 @@ import (
func (c *Collector) buildClrSecurity() {
c.numberLinkTimeChecks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "link_time_checks_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_link_time_checks_total"),
"Displays the total number of link-time code access security checks since the application started.",
[]string{"process"},
nil,
)
c.timeInRTChecks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rt_checks_time_percent"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_rt_checks_time_percent"),
"Displays the percentage of time spent performing runtime code access security checks in the last sample.",
[]string{"process"},
nil,
)
c.stackWalkDepth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "stack_walk_depth"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_stack_walk_depth"),
"Displays the depth of the stack during that last runtime code access security check.",
[]string{"process"},
nil,
)
c.totalRuntimeChecks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "runtime_checks_total"),
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_runtime_checks_total"),
"Displays the total number of runtime code access security checks performed since the application started.",
[]string{"process"},
nil,

View File

@@ -94,20 +94,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
errs := make([]error, 0, 2)
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
}
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
}
c.accessAccepts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_accepts"),
"(AccessAccepts)",
@@ -260,13 +246,27 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
errs := make([]error, 0)
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
}
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
}
return errors.Join(errs...)
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
errs := make([]error, 0)
if err := c.collectAccept(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting NPS accept data: %w", err))

View File

@@ -20,6 +20,7 @@ import (
"fmt"
"log/slog"
"strconv"
"strings"
"time"
"github.com/alecthomas/kingpin/v2"
@@ -52,7 +53,7 @@ type Collector struct {
processesLimit *prometheus.Desc
// users
// Deprecated: Use count(windows_logon_logon_type) instead.
// Deprecated: Use `sum(windows_terminal_services_session_info{state="active"})` instead.
users *prometheus.Desc
// physicalMemoryFreeBytes
@@ -105,7 +106,7 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Warn("The os collect holds a number of deprecated metrics and will be removed mid 2025. "+
logger.Warn("The os collector holds a number of deprecated metrics and will be removed mid 2025. "+
"See https://github.com/prometheus-community/windows_exporter/pull/1596 for more information.",
slog.String("collector", Name),
)
@@ -117,6 +118,11 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
version := windows.RtlGetVersion()
// Microsoft has decided to keep the major version as "10" for Windows 11, including the product name.
if version.BuildNumber >= 22000 {
productName = strings.Replace(productName, " 10 ", " 11 ", 1)
}
c.osInformation = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
`Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \"10\"; a build number greater than 22000 represents Windows 11.`,
@@ -174,7 +180,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
)
c.users = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "users"),
"Deprecated: Use `count(windows_logon_logon_type)` instead.",
"Deprecated: Use `sum(windows_terminal_services_session_info{state=\"active\"})` instead.",
nil,
nil,
)
@@ -203,7 +209,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 4)
errs := make([]error, 0)
c.collect(ch)
@@ -371,5 +377,5 @@ func (c *Collector) getWindowsVersion() (string, string, error) {
return "", "", err
}
return productName, strconv.FormatUint(revision, 10), nil
return strings.TrimSpace(productName), strconv.FormatUint(revision, 10), nil
}

View File

@@ -74,13 +74,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Paging File collector: %w", err)
}
c.pagingLimitBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "limit_bytes"),
"Number of bytes that can be stored in the operating system paging files. 0 (zero) indicates that there are no paging files",
@@ -95,6 +88,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Paging File collector: %w", err)
}
return nil
}

View File

@@ -127,13 +127,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
}
c.requestsQueued = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
"The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)",
@@ -218,6 +211,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
}
return nil
}

View File

@@ -126,25 +126,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
if miSession == nil {
return errors.New("miSession is nil")
}
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQueryPrinter = miQuery
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQueryPrinterJobs = miQuery
c.miSession = miSession
c.printerJobStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "job_status"),
"A counter of printer jobs by status",
@@ -164,6 +145,25 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
nil,
)
if miSession == nil {
return errors.New("miSession is nil")
}
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQueryPrinter = miQuery
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.miQueryPrinterJobs = miQuery
c.miSession = miSession
return nil
}

View File

@@ -102,18 +102,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(*slog.Logger, *mi.Session) error {
var err error
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
}
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)
}
// net
c.baseTCPRTT = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
@@ -238,13 +226,27 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error {
nil,
)
return nil
var err error
errs := make([]error, 0)
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create RemoteFX Network collector: %w", err))
}
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err))
}
return errors.Join(errs...)
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
errs := make([]error, 0)
if err := c.collectRemoteFXNetworkCount(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting RemoteFX Network metrics: %w", err))

View File

@@ -248,7 +248,7 @@ func getScheduledTasks() (ScheduledTasks, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {
var oleCode *ole.OleError
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != S_FALSE {
return nil, err

View File

@@ -350,7 +350,9 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
logLevel := slog.LevelWarn
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
// ERROR_INVALID_PARAMETER returns when the process is not running. This can be happened
// if the service terminated after query the service API.
if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) {
logLevel = slog.LevelDebug
}
@@ -366,9 +368,9 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
// This is realized by ask Service Manager directly.
func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
var (
bytesNeeded uint32
servicesReturned uint32
err error
additionalBytesNeeded uint32
servicesReturned uint32
err error
)
for {
@@ -381,7 +383,7 @@ func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, e
windows.SERVICE_STATE_ALL,
&c.queryAllServicesBuffer[0],
currentBufferSize,
&bytesNeeded,
&additionalBytesNeeded,
&servicesReturned,
nil,
nil,
@@ -395,11 +397,14 @@ func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, e
return nil, err
}
if bytesNeeded <= currentBufferSize {
return nil, fmt.Errorf("windows.EnumServicesStatusEx reports buffer too small (%d), but buffer is large enough (%d)", currentBufferSize, bytesNeeded)
}
/*
Unlike other WIN32 API calls, additionalBytesNeeded is not returning the absolute amount bytes needed,
but the additional bytes needed relative to the cbBufSize parameter.
ref:
https://stackoverflow.com/questions/14756347/when-calling-enumservicesstatusex-twice-i-still-get-eror-more-data-in-c
*/
c.queryAllServicesBuffer = make([]byte, bytesNeeded)
c.queryAllServicesBuffer = make([]byte, currentBufferSize+additionalBytesNeeded)
}
if servicesReturned == 0 {
@@ -417,15 +422,6 @@ func (c *Collector) getProcessStartTime(pid uint32) (uint64, error) {
return 0, fmt.Errorf("failed to open process %w", err)
}
defer func(handle windows.Handle) {
err := windows.CloseHandle(handle)
if err != nil {
c.logger.Warn("failed to close process handle",
slog.Any("err", err),
)
}
}(handle)
var (
creation windows.Filetime
exit windows.Filetime
@@ -434,6 +430,14 @@ func (c *Collector) getProcessStartTime(pid uint32) (uint64, error) {
)
err = windows.GetProcessTimes(handle, &creation, &exit, &krn, &user)
if err := windows.CloseHandle(handle); err != nil {
c.logger.LogAttrs(context.Background(), slog.LevelWarn, "failed to close process handle",
slog.Any("err", err),
slog.Uint64("pid", uint64(pid)),
)
}
if err != nil {
return 0, fmt.Errorf("failed to get process times %w", err)
}
@@ -474,7 +478,7 @@ func (c *Collector) getServiceConfig(service *mgr.Service) (mgr.Config, error) {
*buf = make([]byte, bytesNeeded)
}
c.serviceConfigPoolBytes.Put(buf)
defer c.serviceConfigPoolBytes.Put(buf)
return mgr.Config{
BinaryPathName: windows.UTF16PtrToString(serviceConfig.BinaryPathName),

View File

@@ -76,13 +76,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
}
c.currentOpenFileCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_shares_current_open_file_count"),
"Current total count open files on the SMB Server Share",
@@ -132,6 +125,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
}
return nil
}

View File

@@ -91,13 +91,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
}
// desc creates a new prometheus description
desc := func(metricName string, description string, labels []string) *prometheus.Desc {
return prometheus.NewDesc(
@@ -193,6 +186,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
[]string{"server", "share"},
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
}
return nil
}

View File

@@ -157,13 +157,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
}
logger.Info("smtp collector is in an experimental state! Metrics for this collector have not been tested.",
slog.String("collector", Name),
)
@@ -421,6 +414,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
}
return nil
}

View File

@@ -18,8 +18,10 @@ package system
import (
"fmt"
"log/slog"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
@@ -37,6 +39,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
bootTimeTimestamp float64
perfDataCollector *pdh.Collector
perfDataObject []perfDataCounterValues
@@ -46,8 +50,10 @@ type Collector struct {
processes *prometheus.Desc
processesLimit *prometheus.Desc
systemCallsTotal *prometheus.Desc
bootTime *prometheus.Desc
threads *prometheus.Desc
// Deprecated: Use windows_system_boot_time_timestamp instead
bootTimeSeconds *prometheus.Desc
bootTime *prometheus.Desc
threads *prometheus.Desc
}
func New(config *Config) *Collector {
@@ -77,19 +83,18 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
if err != nil {
return fmt.Errorf("failed to create System collector: %w", err)
}
c.bootTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"),
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp"),
"Unix timestamp of system boot time",
nil,
nil,
)
c.bootTimeSeconds = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"),
"Deprecated: Use windows_system_boot_time_timestamp instead",
nil,
nil,
)
c.contextSwitchesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_switches_total"),
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
@@ -134,6 +139,15 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
c.bootTimeTimestamp = float64(time.Now().Unix() - int64(kernel32.GetTickCount64()/1000))
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
if err != nil {
return fmt.Errorf("failed to create System collector: %w", err)
}
return nil
}
@@ -143,6 +157,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect System metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect System metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(
@@ -170,17 +186,24 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].SystemCallsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.bootTime,
prometheus.GaugeValue,
c.perfDataObject[0].SystemUpTime,
)
ch <- prometheus.MustNewConstMetric(
c.threads,
prometheus.GaugeValue,
c.perfDataObject[0].Threads,
)
ch <- prometheus.MustNewConstMetric(
c.bootTimeSeconds,
prometheus.GaugeValue,
c.bootTimeTimestamp,
)
ch <- prometheus.MustNewConstMetric(
c.bootTime,
prometheus.GaugeValue,
c.bootTimeTimestamp,
)
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824
// https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem

View File

@@ -20,7 +20,6 @@ type perfDataCounterValues struct {
ExceptionDispatchesPerSec float64 `perfdata:"Exception Dispatches/sec"`
ProcessorQueueLength float64 `perfdata:"Processor Queue Length"`
SystemCallsPerSec float64 `perfdata:"System Calls/sec"`
SystemUpTime float64 `perfdata:"System Up Time"`
Processes float64 `perfdata:"Processes"`
Threads float64 `perfdata:"Threads"`
}

View File

@@ -118,18 +118,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil)
if err != nil {
return fmt.Errorf("failed to create TCPv4 collector: %w", err)
}
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil)
if err != nil {
return fmt.Errorf("failed to create TCPv6 collector: %w", err)
}
c.connectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_failures_total"),
"(TCP.ConnectionFailures)",
@@ -190,13 +178,25 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
[]string{"af", "state"}, nil,
)
var err error
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil)
if err != nil {
return fmt.Errorf("failed to create TCPv4 collector: %w", err)
}
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil)
if err != nil {
return fmt.Errorf("failed to create TCPv6 collector: %w", err)
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
errs := make([]error, 0)
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
if err := c.collect(ch); err != nil {

View File

@@ -133,32 +133,8 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
if miSession == nil {
return errors.New("miSession is nil")
}
c.logger = logger.With(slog.String("collector", Name))
var err error
c.perfDataCollectorTerminalServicesSession, err = pdh.NewCollector[perfDataCounterValuesTerminalServicesSession](pdh.CounterTypeRaw, "Terminal Services Session", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Terminal Services Session collector: %w", err)
}
c.connectionBrokerEnabled = isConnectionBrokerServer(miSession)
if c.connectionBrokerEnabled {
var err error
c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
}
} else {
logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
}
c.sessionInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "session_info"),
"Terminal Services sessions info",
@@ -250,18 +226,40 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
nil,
)
if miSession == nil {
return errors.New("miSession is nil")
}
var err error
c.connectionBrokerEnabled = isConnectionBrokerServer(miSession)
if c.connectionBrokerEnabled {
c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
}
} else {
logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
}
c.hServer, err = wtsapi32.WTSOpenServer("")
if err != nil {
return fmt.Errorf("failed to open WTS server: %w", err)
}
c.perfDataCollectorTerminalServicesSession, err = pdh.NewCollector[perfDataCounterValuesTerminalServicesSession](pdh.CounterTypeRaw, "Terminal Services Session", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Terminal Services Session collector: %w", err)
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 3)
errs := make([]error, 0)
if err := c.collectWTSSessions(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting terminal services session infos: %w", err))
@@ -439,7 +437,7 @@ func (c *Collector) collectWTSSessions(ch chan<- prometheus.Metric) error {
for _, session := range sessions {
// only connect metrics for remote named sessions
n := strings.ReplaceAll(session.SessionName, "#", " ")
if n == "" || n == "Services" || n == "Console" {
if n == "Services" {
continue
}

View File

@@ -16,6 +16,7 @@
package textfile_test
import (
"context"
"fmt"
"io"
"log/slog"
@@ -44,7 +45,7 @@ func TestMultipleDirectories(t *testing.T) {
})
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
require.NoError(t, collectors.Build(logger))
require.NoError(t, collectors.Build(context.Background(), logger))
metrics := make(chan prometheus.Metric)
got := ""
@@ -81,7 +82,7 @@ func TestDuplicateFileName(t *testing.T) {
})
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
require.NoError(t, collectors.Build(logger))
require.NoError(t, collectors.Build(context.Background(), logger))
metrics := make(chan prometheus.Metric)
got := ""

View File

@@ -70,13 +70,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
}
c.temperature = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"),
"(Temperature)",
@@ -102,6 +95,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
}
return nil
}

View File

@@ -23,6 +23,7 @@ import (
"strings"
"time"
"github.com/Microsoft/hcsshim/osversion"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
"github.com/prometheus-community/windows_exporter/internal/mi"
@@ -58,14 +59,17 @@ type Collector struct {
perfDataCollector *pdh.Collector
perfDataObject []perfDataCounterValues
currentTime *prometheus.Desc
timezone *prometheus.Desc
clockFrequencyAdjustmentPPBTotal *prometheus.Desc
computedTimeOffset *prometheus.Desc
ntpClientTimeSourceCount *prometheus.Desc
ntpRoundTripDelay *prometheus.Desc
ntpServerIncomingRequestsTotal *prometheus.Desc
ntpServerOutgoingResponsesTotal *prometheus.Desc
ppbCounterPresent bool
currentTime *prometheus.Desc
timezone *prometheus.Desc
clockFrequencyAdjustment *prometheus.Desc
clockFrequencyAdjustmentPPB *prometheus.Desc
computedTimeOffset *prometheus.Desc
ntpClientTimeSourceCount *prometheus.Desc
ntpRoundTripDelay *prometheus.Desc
ntpServerIncomingRequestsTotal *prometheus.Desc
ntpServerOutgoingResponsesTotal *prometheus.Desc
}
func New(config *Config) *Collector {
@@ -125,12 +129,8 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
}
}
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Windows Time Service", nil)
if err != nil {
return fmt.Errorf("failed to create Windows Time Service collector: %w", err)
}
// https://github.com/prometheus-community/windows_exporter/issues/1891
c.ppbCounterPresent = osversion.Build() >= osversion.LTSC2019
c.currentTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_timestamp_seconds"),
@@ -144,9 +144,15 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
[]string{"timezone"},
nil,
)
c.clockFrequencyAdjustmentPPBTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment_ppb_total"),
"Total adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units.",
c.clockFrequencyAdjustment = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment"),
"This value reflects the adjustment made to the local system clock frequency by W32Time in nominal clock units. This counter helps visualize the finer adjustments being made by W32time to synchronize the local clock.",
nil,
nil,
)
c.clockFrequencyAdjustmentPPB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment_ppb"),
"This value reflects the adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units. 1 PPB adjustment imples the system clock was adjusted at a rate of 1 nanosecond per second. The smallest possible adjustment can vary and can be expected to be in the order of 100&apos;s of PPB. This counter helps visualize the finer actions being taken by W32time to synchronize the local clock.",
nil,
nil,
)
@@ -181,13 +187,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Windows Time Service", nil)
if err != nil {
return fmt.Errorf("failed to create Windows Time Service collector: %w", err)
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
errs := make([]error, 0)
if slices.Contains(c.config.CollectorsEnabled, collectorSystemTime) {
if err := c.collectTime(ch); err != nil {
@@ -232,14 +245,25 @@ func (c *Collector) collectTime(ch chan<- prometheus.Metric) error {
func (c *Collector) collectNTP(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
return fmt.Errorf("failed to collect Windows Time Service metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect Windows Time Service metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(
c.clockFrequencyAdjustmentPPBTotal,
prometheus.CounterValue,
c.perfDataObject[0].ClockFrequencyAdjustmentPPBTotal,
c.clockFrequencyAdjustment,
prometheus.GaugeValue,
c.perfDataObject[0].ClockFrequencyAdjustment,
)
if c.ppbCounterPresent {
ch <- prometheus.MustNewConstMetric(
c.clockFrequencyAdjustmentPPB,
prometheus.GaugeValue,
c.perfDataObject[0].ClockFrequencyAdjustmentPPB,
)
}
ch <- prometheus.MustNewConstMetric(
c.computedTimeOffset,
prometheus.GaugeValue,

View File

@@ -16,10 +16,11 @@
package time
type perfDataCounterValues struct {
ClockFrequencyAdjustmentPPBTotal float64 `perfdata:"Clock Frequency Adjustment (ppb)"`
ComputedTimeOffset float64 `perfdata:"Computed Time Offset"`
NTPClientTimeSourceCount float64 `perfdata:"NTP Client Time Source Count"`
NTPRoundTripDelay float64 `perfdata:"NTP Roundtrip Delay"`
NTPServerIncomingRequestsTotal float64 `perfdata:"NTP Server Incoming Requests"`
NTPServerOutgoingResponsesTotal float64 `perfdata:"NTP Server Outgoing Responses"`
ClockFrequencyAdjustment float64 `perfdata:"Clock Frequency Adjustment"`
ClockFrequencyAdjustmentPPB float64 `perfdata:"Clock Frequency Adjustment (ppb)" perfdata_min_build:"17763"`
ComputedTimeOffset float64 `perfdata:"Computed Time Offset"`
NTPClientTimeSourceCount float64 `perfdata:"NTP Client Time Source Count"`
NTPRoundTripDelay float64 `perfdata:"NTP Roundtrip Delay"`
NTPServerIncomingRequestsTotal float64 `perfdata:"NTP Server Incoming Requests"`
NTPServerOutgoingResponsesTotal float64 `perfdata:"NTP Server Outgoing Responses"`
}

View File

@@ -80,18 +80,6 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil)
if err != nil {
return fmt.Errorf("failed to create UDPv4 collector: %w", err)
}
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil)
if err != nil {
return fmt.Errorf("failed to create UDPv6 collector: %w", err)
}
c.datagramsNoPortTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "datagram_no_port_total"),
"Number of received UDP datagrams for which there was no application at the destination port",
@@ -117,6 +105,18 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
var err error
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil)
if err != nil {
return fmt.Errorf("failed to create UDPv4 collector: %w", err)
}
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil)
if err != nil {
return fmt.Errorf("failed to create UDPv6 collector: %w", err)
}
return nil
}

View File

@@ -37,14 +37,14 @@ import (
const Name = "update"
type Config struct {
online bool `yaml:"online"`
scrapeInterval time.Duration `yaml:"scrape_interval"`
Online bool `yaml:"online"`
ScrapeInterval time.Duration `yaml:"scrape_interval"`
}
//nolint:gochecknoglobals
var ConfigDefaults = Config{
online: false,
scrapeInterval: 6 * time.Hour,
Online: false,
ScrapeInterval: 6 * time.Hour,
}
var (
@@ -85,12 +85,12 @@ func NewWithFlags(app *kingpin.Application) *Collector {
app.Flag(
"collector.updates.online",
"Whether to search for updates online.",
).Default(strconv.FormatBool(ConfigDefaults.online)).BoolVar(&c.config.online)
).Default(strconv.FormatBool(ConfigDefaults.Online)).BoolVar(&c.config.Online)
app.Flag(
"collector.updates.scrape-interval",
"Define the interval of scraping Windows Update information.",
).Default(ConfigDefaults.scrapeInterval.String()).DurationVar(&c.config.scrapeInterval)
).Default(ConfigDefaults.ScrapeInterval.String()).DurationVar(&c.config.ScrapeInterval)
return c
}
@@ -109,7 +109,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
ctx, cancel := context.WithCancel(context.Background())
initErrCh := make(chan error, 1)
go c.scheduleUpdateStatus(ctx, logger, initErrCh, c.config.online)
go c.scheduleUpdateStatus(ctx, logger, initErrCh, c.config.Online)
c.ctxCancelFn = cancel
@@ -166,7 +166,7 @@ func (c *Collector) scheduleUpdateStatus(ctx context.Context, logger *slog.Logge
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {
var oleCode *ole.OleError
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != 0x00000001 {
initErrCh <- fmt.Errorf("CoInitializeEx: %w", err)
@@ -178,17 +178,17 @@ func (c *Collector) scheduleUpdateStatus(ctx context.Context, logger *slog.Logge
defer ole.CoUninitialize()
// Create a new instance of the WMI object
mus, err := oleutil.CreateObject("Microsoft.Update.Session")
sessionObj, err := oleutil.CreateObject("Microsoft.Update.Session")
if err != nil {
initErrCh <- fmt.Errorf("create Microsoft.Update.Session: %w", err)
return
}
defer mus.Release()
defer sessionObj.Release()
// Query the IDispatch interface of the object
musQueryInterface, err := mus.QueryInterface(ole.IID_IDispatch)
musQueryInterface, err := sessionObj.QueryInterface(ole.IID_IDispatch)
if err != nil {
initErrCh <- fmt.Errorf("IID_IDispatch: %w", err)
@@ -206,9 +206,9 @@ func (c *Collector) scheduleUpdateStatus(ctx context.Context, logger *slog.Logge
// https://learn.microsoft.com/en-us/windows/win32/api/wuapi/nf-wuapi-iupdatesession-createupdatesearcher
us, err := oleutil.CallMethod(musQueryInterface, "CreateUpdateSearcher")
defer func(hc *ole.VARIANT) {
defer func(us *ole.VARIANT) {
if us != nil {
_ = hc.Clear()
_ = us.Clear()
}
}(us)
@@ -268,7 +268,7 @@ func (c *Collector) scheduleUpdateStatus(ctx context.Context, logger *slog.Logge
c.mu.Unlock()
select {
case <-time.After(c.config.scrapeInterval):
case <-time.After(c.config.ScrapeInterval):
case <-ctx.Done():
return
}

View File

@@ -230,7 +230,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
errs := make([]error, 0)
if err := c.collectCpu(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting vmware cpu metrics: %w", err))

View File

@@ -38,8 +38,52 @@ type Resolver struct {
flags map[string]string
}
// NewResolver returns a Resolver structure.
func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) (*Resolver, error) {
// Parse parses the command line arguments and configuration files.
func Parse(app *kingpin.Application, args []string) error {
configFile := ParseConfigFile(args)
if configFile != "" {
resolver, err := NewConfigFileResolver(configFile)
if err != nil {
return fmt.Errorf("failed to load configuration file: %w", err)
}
if err = resolver.Bind(app, args); err != nil {
return fmt.Errorf("failed to bind configuration: %w", err)
}
}
if _, err := app.Parse(args); err != nil {
return fmt.Errorf("failed to parse flags: %w", err)
}
return nil
}
// ParseConfigFile manually parses the configuration file from the command line arguments.
func ParseConfigFile(args []string) string {
for i, cliFlag := range args {
if strings.HasPrefix(cliFlag, "--config.file=") {
return strings.TrimPrefix(cliFlag, "--config.file=")
}
if strings.HasPrefix(cliFlag, "-config.file=") {
return strings.TrimPrefix(cliFlag, "-config.file=")
}
if strings.HasSuffix(cliFlag, "-config.file") {
if len(os.Args) <= i+1 {
return ""
}
return os.Args[i+1]
}
}
return ""
}
// NewConfigFileResolver returns a Resolver structure.
func NewConfigFileResolver(file string) (*Resolver, error) {
flags := map[string]string{}
var (
@@ -48,14 +92,15 @@ func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecure
)
if strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") {
logger.WarnContext(ctx, "Loading configuration file from URL is deprecated and will be removed in 0.31.0. Use a local file instead.")
//nolint:sloglint // we do not have an logger yet
slog.Warn("Loading configuration file from URL is deprecated and will be removed in 0.31.0. Use a local file instead.")
fileBytes, err = readFromURL(ctx, file, logger, insecureSkipVerify)
fileBytes, err = readFromURL(file)
if err != nil {
return nil, err
}
} else {
fileBytes, err = readFromFile(ctx, file, logger)
fileBytes, err = readFromFile(file)
if err != nil {
return nil, err
}
@@ -79,9 +124,7 @@ func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecure
return &Resolver{flags: flags}, nil
}
func readFromFile(ctx context.Context, file string, logger *slog.Logger) ([]byte, error) {
logger.InfoContext(ctx, "loading configuration file: "+file)
func readFromFile(file string) ([]byte, error) {
if _, err := os.Stat(file); err != nil {
return nil, fmt.Errorf("failed to read configuration file: %w", err)
}
@@ -94,20 +137,14 @@ func readFromFile(ctx context.Context, file string, logger *slog.Logger) ([]byte
return fileBytes, nil
}
func readFromURL(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) ([]byte, error) {
logger.InfoContext(ctx, "loading configuration file from URL: "+file)
func readFromURL(file string) ([]byte, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, //nolint:gosec
}
if insecureSkipVerify {
logger.WarnContext(ctx, "Loading configuration file with TLS verification disabled")
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec
}
client := &http.Client{Transport: tr}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, file, nil)
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, file, nil)
if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %w", err)
}

View File

@@ -23,10 +23,11 @@ import (
//nolint:gochecknoglobals
var (
kernel32 = windows.NewLazySystemDLL("kernel32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procGetDynamicTimeZoneInformationSys = kernel32.NewProc("GetDynamicTimeZoneInformation")
kernelLocalFileTimeToFileTime = kernel32.NewProc("LocalFileTimeToFileTime")
procGetDynamicTimeZoneInformationSys = modkernel32.NewProc("GetDynamicTimeZoneInformation")
procKernelLocalFileTimeToFileTime = modkernel32.NewProc("LocalFileTimeToFileTime")
procGetTickCount = modkernel32.NewProc("GetTickCount64")
)
// SYSTEMTIME contains a date and time.
@@ -70,9 +71,15 @@ func GetDynamicTimeZoneInformation() (DynamicTimezoneInformation, error) {
}
func LocalFileTimeToFileTime(localFileTime, utcFileTime *windows.Filetime) uint32 {
ret, _, _ := kernelLocalFileTimeToFileTime.Call(
ret, _, _ := procKernelLocalFileTimeToFileTime.Call(
uintptr(unsafe.Pointer(localFileTime)),
uintptr(unsafe.Pointer(utcFileTime)))
return uint32(ret)
}
func GetTickCount64() uint64 {
ret, _, _ := procGetTickCount.Call()
return uint64(ret)
}

View File

@@ -40,7 +40,7 @@ func (s *ScheduleService) Connect() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {
var oleCode *ole.OleError
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != 0x00000001 {
return err

View File

@@ -22,7 +22,6 @@ import (
"strconv"
"time"
"github.com/google/uuid"
"github.com/prometheus-community/windows_exporter/pkg/collector"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
@@ -82,8 +81,7 @@ func New(logger *slog.Logger, metricCollectors *collector.Collection, options *O
func (c *MetricsHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger := c.logger.With(
slog.Any("remote", r.RemoteAddr),
slog.Any("correlation_id", uuid.New().String()),
slog.String("remote", r.RemoteAddr),
)
scrapeTimeout := c.getScrapeTimeout(logger, r)

Some files were not shown because too many files have changed in this diff Show More