mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 22:16:38 +00:00
Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
66d207b0e2 | ||
|
|
15156ce444 | ||
|
|
716362d2ee | ||
|
|
1f0880b998 | ||
|
|
0f7f8f2583 | ||
|
|
7fa029a403 | ||
|
|
fb9f1fe141 | ||
|
|
bf281d9e08 | ||
|
|
d451acbd63 | ||
|
|
7c14a79ef2 | ||
|
|
3d7b16d61d | ||
|
|
a3131dc087 | ||
|
|
93940569fa | ||
|
|
1e24d7b2c9 | ||
|
|
109f537c14 | ||
|
|
62b796e6f6 | ||
|
|
8bae1abe20 | ||
|
|
db60c78f32 | ||
|
|
bdd7725f17 | ||
|
|
9ed3769765 | ||
|
|
aa7157e27c | ||
|
|
13d5e1cd12 | ||
|
|
2c4698f119 | ||
|
|
759faee1c3 | ||
|
|
50808c73fe | ||
|
|
fe17f5f597 | ||
|
|
b62c724977 | ||
|
|
7252d403ae | ||
|
|
3180315cff | ||
|
|
9da6e56fcf | ||
|
|
c300935170 | ||
|
|
6f0209ddb7 | ||
|
|
a56e1ac71a | ||
|
|
0c44a934f4 | ||
|
|
d1151e91f3 | ||
|
|
cbe94c1ea5 | ||
|
|
b809f5a8ee | ||
|
|
756d9c160d | ||
|
|
a0e132b30e | ||
|
|
d645e89be9 | ||
|
|
a73a08d704 | ||
|
|
228164765b | ||
|
|
4c9c78c599 | ||
|
|
4b3c154049 | ||
|
|
be0037eda5 | ||
|
|
367fae95c4 | ||
|
|
96ffc3bf3f | ||
|
|
285c4cc5ea | ||
|
|
f07aceb0dd | ||
|
|
dcacce4577 | ||
|
|
fc5b3051fa | ||
|
|
1b2958a7cc | ||
|
|
a20e1854d1 | ||
|
|
fe21cb44f6 | ||
|
|
71ec0bd6a3 | ||
|
|
8bff623393 | ||
|
|
3eabd0a00c | ||
|
|
73186cde48 | ||
|
|
25e04fc947 | ||
|
|
6b7201856c | ||
|
|
608b83cfd8 | ||
|
|
40a42ca457 | ||
|
|
423c8a787e | ||
|
|
6cefbed7f7 | ||
|
|
5836a7dbf2 |
@@ -13,4 +13,4 @@ indent_size = 4
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
indent_size = 2
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,3 +1,5 @@
|
||||
|
||||
|
||||
<!--
|
||||
Please give your PR a title in the form "area: short description". For example "cpu: reduce usage by 95%" or "docs: fix typo in installation.md".
|
||||
|
||||
|
||||
24
.github/workflows/lint.yml
vendored
24
.github/workflows/lint.yml
vendored
@@ -4,27 +4,13 @@ name: Linting
|
||||
# have been changed.
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
- "**.go"
|
||||
- ".github/workflows/lint.yml"
|
||||
- "tools/e2e-output.txt"
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- main
|
||||
- "0.*"
|
||||
- "1.*"
|
||||
pull_request:
|
||||
paths:
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
- "**.go"
|
||||
- ".github/workflows/lint.yml"
|
||||
- "tools/e2e-output.txt"
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- main
|
||||
|
||||
env:
|
||||
VERSION_PROMU: '0.14.0'
|
||||
@@ -32,7 +18,7 @@ env:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: windows-2019
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
@@ -55,7 +41,7 @@ jobs:
|
||||
run: make e2e-test
|
||||
|
||||
promtool:
|
||||
runs-on: windows-2019
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
@@ -105,4 +91,4 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.60
|
||||
args: "--max-same-issues=0"
|
||||
args: "--max-same-issues=0"
|
||||
|
||||
2
.github/workflows/pr-check.yaml
vendored
2
.github/workflows/pr-check.yaml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
- name: check
|
||||
run: |
|
||||
PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1)
|
||||
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
|
||||
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]] || [[ "$PR_TITLE_PREFIX" == "[0."* ]] || [[ "$PR_TITLE_PREFIX" == "[1."* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
89
.github/workflows/release.yml
vendored
89
.github/workflows/release.yml
vendored
@@ -22,6 +22,7 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: windows-2022
|
||||
environment: build
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -32,13 +33,14 @@ jobs:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Install WiX
|
||||
run: dotnet tool install --global wix
|
||||
run: |
|
||||
dotnet tool install --global wix --version 5.0.2
|
||||
|
||||
- name: Install WiX extensions
|
||||
run: |
|
||||
wix extension add -g WixToolset.Util.wixext
|
||||
wix extension add -g WixToolset.Ui.wixext
|
||||
wix extension add -g WixToolset.Firewall.wixext
|
||||
wix extension add -g WixToolset.Util.wixext/5.0.2
|
||||
wix extension add -g WixToolset.Ui.wixext/5.0.2
|
||||
wix extension add -g WixToolset.Firewall.wixext/5.0.2
|
||||
|
||||
- name: Install Build deps
|
||||
run: |
|
||||
@@ -68,6 +70,40 @@ jobs:
|
||||
|
||||
Get-ChildItem -Path output
|
||||
|
||||
- name: Sign build artifacts
|
||||
if: ${{ (github.event_name != 'pull_request' && github.repository == 'prometheus-community/windows_exporter') || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'prometheus-community/windows_exporter') }}
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$Version = Get-Content VERSION
|
||||
|
||||
$b64 = $env:CODE_SIGN_KEY
|
||||
$filename = 'windows_exporter_CodeSign.pfx'
|
||||
|
||||
$bytes = [Convert]::FromBase64String($b64)
|
||||
[IO.File]::WriteAllBytes($filename, $bytes)
|
||||
|
||||
$basePath = "C:\Program Files (x86)\Windows Kits\10\bin"
|
||||
$latestSigntool = Get-ChildItem -Path $basePath -Directory |
|
||||
Where-Object { $_.Name -match "^\d+\.\d+\.\d+\.\d+$" } |
|
||||
Sort-Object { [Version]$_.Name } -Descending |
|
||||
Select-Object -First 1 |
|
||||
ForEach-Object { Join-Path $_.FullName "x64\signtool.exe" }
|
||||
|
||||
if (Test-Path $latestSigntool) {
|
||||
Write-Output $latestSigntool
|
||||
} else {
|
||||
Write-Output "signtool.exe not found"
|
||||
}
|
||||
|
||||
foreach($Arch in "amd64", "arm64") {
|
||||
& $latestSigntool sign /v /tr "http://timestamp.digicert.com" /d "Prometheus exporter for Windows machines" /td SHA256 /fd SHA256 /a /f "windows_exporter_CodeSign.pfx" /p $env:CODE_SIGN_PASSWORD "output\windows_exporter-$Version-$Arch.exe"
|
||||
}
|
||||
|
||||
rm windows_exporter_CodeSign.pfx
|
||||
env:
|
||||
CODE_SIGN_KEY: ${{ secrets.CODE_SIGN_KEY }}
|
||||
CODE_SIGN_PASSWORD: ${{ secrets.CODE_SIGN_PASSWORD }}
|
||||
|
||||
- name: Build Release Artifacts
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
@@ -79,9 +115,46 @@ jobs:
|
||||
}
|
||||
|
||||
Move-Item installer\*.msi output\
|
||||
Get-ChildItem -Path output\
|
||||
Get-ChildItem -Path output\ g
|
||||
|
||||
promu checksum output\
|
||||
- name: Sign installer artifacts
|
||||
if: ${{ (github.event_name != 'pull_request' && github.repository == 'prometheus-community/windows_exporter') || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'prometheus-community/windows_exporter') }}
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$Version = Get-Content VERSION
|
||||
|
||||
$b64 = $env:CODE_SIGN_KEY
|
||||
$filename = 'windows_exporter_CodeSign.pfx'
|
||||
|
||||
$bytes = [Convert]::FromBase64String($b64)
|
||||
[IO.File]::WriteAllBytes($filename, $bytes)
|
||||
|
||||
$basePath = "C:\Program Files (x86)\Windows Kits\10\bin"
|
||||
$latestSigntool = Get-ChildItem -Path $basePath -Directory |
|
||||
Where-Object { $_.Name -match "^\d+\.\d+\.\d+\.\d+$" } |
|
||||
Sort-Object { [Version]$_.Name } -Descending |
|
||||
Select-Object -First 1 |
|
||||
ForEach-Object { Join-Path $_.FullName "x64\signtool.exe" }
|
||||
|
||||
if (Test-Path $latestSigntool) {
|
||||
Write-Output $latestSigntool
|
||||
} else {
|
||||
Write-Output "signtool.exe not found"
|
||||
}
|
||||
|
||||
foreach($Arch in "amd64", "arm64") {
|
||||
& $latestSigntool sign /v /tr "http://timestamp.digicert.com" /d "Prometheus exporter for Windows machines" /td SHA256 /fd SHA256 /a /f "windows_exporter_CodeSign.pfx" /p $env:CODE_SIGN_PASSWORD "output\windows_exporter-$Version-$Arch.msi"
|
||||
}
|
||||
|
||||
rm windows_exporter_CodeSign.pfx
|
||||
env:
|
||||
CODE_SIGN_KEY: ${{ secrets.CODE_SIGN_KEY }}
|
||||
CODE_SIGN_PASSWORD: ${{ secrets.CODE_SIGN_PASSWORD }}
|
||||
|
||||
- name: Generate checksums
|
||||
run: |
|
||||
promu checksum output
|
||||
cat output\sha256sums.txt
|
||||
|
||||
- name: Upload Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -103,6 +176,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
env:
|
||||
DOCKER_BUILD_SUMMARY: false
|
||||
DOCKER_BUILD_RECORD_UPLOAD: false
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -165,3 +241,4 @@ jobs:
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: windows/amd64
|
||||
annotations: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
2
.github/workflows/stale-close.yml
vendored
2
.github/workflows/stale-close.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# opt out of defaults to avoid marking issues as stale and closing them
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# opt out of defaults to avoid marking issues as stale and closing them
|
||||
|
||||
13
README.md
13
README.md
@@ -33,7 +33,6 @@ Name | Description | Enabled by default
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[license](docs/collector.license.md) | Windows license status |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[logon](docs/collector.logon.md) | User logon sessions |
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics | ✓
|
||||
[mscluster](docs/collector.mscluster.md) | MSCluster metrics |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
@@ -88,7 +87,7 @@ windows_exporter accepts flags to configure certain behaviours. The ones configu
|
||||
| `--web.listen-address` | host:port for exporter. | `:9182` |
|
||||
| `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` |
|
||||
| `--telemetry.max-requests` | Maximum number of concurrent requests. 0 to disable. | `5` |
|
||||
| `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default." | `[defaults]` |
|
||||
| `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default. | `[defaults]` |
|
||||
| `--collectors.print` | If true, print available collectors and exit. | |
|
||||
| `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` |
|
||||
| `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None |
|
||||
@@ -100,6 +99,9 @@ windows_exporter accepts flags to configure certain behaviours. The ones configu
|
||||
|
||||
The latest release can be downloaded from the [releases page](https://github.com/prometheus-community/windows_exporter/releases).
|
||||
|
||||
All binaries and installation packages are signed with an self-signed certificate. The public key can be found [here](https://github.com/prometheus-community/windows_exporter/blob/master/installer/codesign.cer).
|
||||
Once import into the trusted root certificate store, the binaries and installation packages will be trusted.
|
||||
|
||||
Each release provides a .msi installer. The installer will setup the windows_exporter as a Windows service, as well as create an exception in the Windows Firewall.
|
||||
|
||||
If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc.
|
||||
@@ -125,6 +127,8 @@ The following parameters are available:
|
||||
| `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. For `--collectors.enabled` and `--config.file`, use the specialized properties `ENABLED_COLLECTORS` and `CONFIG_FILE` |
|
||||
| `ADDLOCAL` | Enables features within the windows_exporter installer. Supported values: `FirewallException` |
|
||||
| `REMOVE` | Disables features within the windows_exporter installer. Supported values: `FirewallException` |
|
||||
| `APPLICATIONFOLDER` | Directory to install windows_exporter. Defaults to `C:\Program Files\windows_exporter` |
|
||||
|
||||
|
||||
Parameters are sent to the installer via `msiexec`.
|
||||
On PowerShell, the `--%` should be passed before defining properties.
|
||||
@@ -145,6 +149,11 @@ Define a config file.
|
||||
msiexec /i <path-to-msi-file> --% CONFIG_FILE="D:\config.yaml"
|
||||
```
|
||||
|
||||
Alternative install directory
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> --% ADDLOCAL=FirewallException APPLICATIONFOLDER="F:\Program Files\windows_exporter"
|
||||
```
|
||||
|
||||
On some older versions of Windows,
|
||||
you may need to surround parameter values with double quotes to get the installation command parsing properly:
|
||||
```powershell
|
||||
|
||||
@@ -14,8 +14,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/svc"
|
||||
@@ -33,6 +36,9 @@ var (
|
||||
|
||||
// stopCh is a channel to send a signal to the service manager that the service is stopping.
|
||||
stopCh = make(chan struct{})
|
||||
|
||||
// serviceManagerFinishedCh is a channel to send a signal to the main function that the service manager has stopped the service.
|
||||
serviceManagerFinishedCh = make(chan struct{}, 1)
|
||||
)
|
||||
|
||||
// IsService variable declaration allows initiating time-sensitive components like registering the Windows service
|
||||
@@ -49,33 +55,37 @@ var (
|
||||
//
|
||||
//nolint:gochecknoglobals
|
||||
var IsService = func() bool {
|
||||
defer func() {
|
||||
go func() {
|
||||
err := svc.Run(serviceName, &windowsExporterService{})
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = logToEventToLog(windows.EVENTLOG_ERROR_TYPE, fmt.Sprintf("failed to start service: %v", err))
|
||||
}()
|
||||
}()
|
||||
|
||||
var err error
|
||||
|
||||
isService, err := svc.IsWindowsService()
|
||||
isService, err := isWindowsService()
|
||||
if err != nil {
|
||||
_ = logToEventToLog(windows.EVENTLOG_ERROR_TYPE, fmt.Sprintf("failed to detect service: %v", err))
|
||||
logToFile(fmt.Sprintf("failed to detect service: %v", err))
|
||||
|
||||
exitCodeCh <- 1
|
||||
return false
|
||||
}
|
||||
|
||||
if !isService {
|
||||
return false
|
||||
}
|
||||
|
||||
defer func() {
|
||||
go func() {
|
||||
err := svc.Run(serviceName, &windowsExporterService{})
|
||||
if err != nil {
|
||||
// https://github.com/open-telemetry/opentelemetry-collector/pull/9042
|
||||
if !errors.Is(err, windows.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT) {
|
||||
if logErr := logToEventToLog(windows.EVENTLOG_ERROR_TYPE, fmt.Sprintf("failed to start service: %v", err)); logErr != nil {
|
||||
logToFile(fmt.Sprintf("failed to start service: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
serviceManagerFinishedCh <- struct{}{}
|
||||
}()
|
||||
}()
|
||||
|
||||
if err := logToEventToLog(windows.EVENTLOG_INFORMATION_TYPE, "attempting to start exporter service"); err != nil {
|
||||
//nolint:gosec
|
||||
_ = os.WriteFile("C:\\Program Files\\windows_exporter\\start-service.error.log", []byte(fmt.Sprintf("failed sent log to event log: %v", err)), 0o644)
|
||||
logToFile(fmt.Sprintf("failed sent log to event log: %v", err))
|
||||
|
||||
exitCodeCh <- 2
|
||||
}
|
||||
@@ -122,7 +132,7 @@ func (s *windowsExporterService) Execute(_ []string, r <-chan svc.ChangeRequest,
|
||||
|
||||
// logToEventToLog logs a message to the Windows event log.
|
||||
func logToEventToLog(eType uint16, msg string) error {
|
||||
eventLog, err := eventlog.Open("windows_exporter")
|
||||
eventLog, err := eventlog.Open(serviceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open event log: %w", err)
|
||||
}
|
||||
@@ -130,18 +140,70 @@ func logToEventToLog(eType uint16, msg string) error {
|
||||
_ = eventLog.Close()
|
||||
}(eventLog)
|
||||
|
||||
p, err := windows.UTF16PtrFromString(msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error convert string to UTF-16: %w", err)
|
||||
switch eType {
|
||||
case windows.EVENTLOG_ERROR_TYPE:
|
||||
err = eventLog.Error(102, msg)
|
||||
case windows.EVENTLOG_WARNING_TYPE:
|
||||
err = eventLog.Warning(101, msg)
|
||||
case windows.EVENTLOG_INFORMATION_TYPE:
|
||||
err = eventLog.Info(100, msg)
|
||||
}
|
||||
|
||||
zero := uint16(0)
|
||||
ss := []*uint16{p, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero}
|
||||
|
||||
err = windows.ReportEvent(eventLog.Handle, eType, 0, 3299, 0, 9, 0, &ss[0], nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error report event: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func logToFile(msg string) {
|
||||
if file, err := os.CreateTemp("", "windows_exporter.service.error.log"); err == nil {
|
||||
_, _ = file.WriteString(msg)
|
||||
_ = file.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// isWindowsService is a clone of "golang.org/x/sys/windows/svc:IsWindowsService", but with a fix
|
||||
// for Windows containers.
|
||||
// Go cloned the .NET implementation of this function, which has since
|
||||
// been patched to support Windows containers, which don't use Session ID 0 for services.
|
||||
// https://github.com/dotnet/runtime/pull/74188
|
||||
// This function can be replaced with go's once go brings in the fix.
|
||||
//
|
||||
// Copyright 2023-present Datadog, Inc.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// https://github.com/DataDog/datadog-agent/blob/46740e82ef40a04c4be545ed8c16a4b0d1f046cf/pkg/util/winutil/servicemain/servicemain.go#L128
|
||||
func isWindowsService() (bool, error) {
|
||||
var currentProcess windows.PROCESS_BASIC_INFORMATION
|
||||
infoSize := uint32(unsafe.Sizeof(currentProcess))
|
||||
|
||||
err := windows.NtQueryInformationProcess(windows.CurrentProcess(), windows.ProcessBasicInformation, unsafe.Pointer(¤tProcess), infoSize, &infoSize)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var parentProcess *windows.SYSTEM_PROCESS_INFORMATION
|
||||
|
||||
for infoSize = uint32((unsafe.Sizeof(*parentProcess) + unsafe.Sizeof(uintptr(0))) * 1024); ; {
|
||||
parentProcess = (*windows.SYSTEM_PROCESS_INFORMATION)(unsafe.Pointer(&make([]byte, infoSize)[0]))
|
||||
|
||||
err = windows.NtQuerySystemInformation(windows.SystemProcessInformation, unsafe.Pointer(parentProcess), infoSize, &infoSize)
|
||||
if err == nil {
|
||||
break
|
||||
} else if !errors.Is(err, windows.STATUS_INFO_LENGTH_MISMATCH) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
for ; ; parentProcess = (*windows.SYSTEM_PROCESS_INFORMATION)(unsafe.Pointer(uintptr(unsafe.Pointer(parentProcess)) + uintptr(parentProcess.NextEntryOffset))) {
|
||||
if parentProcess.UniqueProcessID == currentProcess.InheritedFromUniqueProcessId {
|
||||
return strings.EqualFold("services.exe", parentProcess.ImageName.String()), nil
|
||||
}
|
||||
|
||||
if parentProcess.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -47,7 +47,11 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
exitCode := run()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
|
||||
|
||||
exitCode := run(ctx, os.Args[1:])
|
||||
|
||||
stop()
|
||||
|
||||
// If we are running as a service, we need to signal the service control manager that we are done.
|
||||
if !IsService {
|
||||
@@ -57,12 +61,11 @@ func main() {
|
||||
exitCodeCh <- exitCode
|
||||
|
||||
// Wait for the service control manager to signal that we are done.
|
||||
<-stopCh
|
||||
<-serviceManagerFinishedCh
|
||||
}
|
||||
|
||||
func run() int {
|
||||
func run(ctx context.Context, args []string) int {
|
||||
startTime := time.Now()
|
||||
ctx := context.Background()
|
||||
|
||||
app := kingpin.New("windows_exporter", "A metrics collector for Windows.")
|
||||
|
||||
@@ -71,7 +74,7 @@ func run() int {
|
||||
"config.file",
|
||||
"YAML configuration file to use. Values set in this file will be overridden by CLI flags.",
|
||||
).String()
|
||||
insecureSkipVerify = app.Flag(
|
||||
_ = app.Flag(
|
||||
"config.file.insecure-skip-verify",
|
||||
"Skip TLS verification in loading YAML configuration.",
|
||||
).Default("false").Bool()
|
||||
@@ -122,11 +125,10 @@ func run() int {
|
||||
// Initialize collectors before loading and parsing CLI arguments
|
||||
collectors := collector.NewWithFlags(app)
|
||||
|
||||
// Load values from configuration file(s). Executable flags must first be parsed, in order
|
||||
// to load the specified file(s).
|
||||
if _, err := app.Parse(os.Args[1:]); err != nil {
|
||||
//nolint:contextcheck
|
||||
if err := config.Parse(app, args); err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("Failed to parse CLI args",
|
||||
slog.LogAttrs(ctx, slog.LevelError, "Failed to load configuration",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -137,61 +139,21 @@ func run() int {
|
||||
|
||||
logger, err := log.New(logConfig)
|
||||
if err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("failed to create logger",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "failed to create logger",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
if *configFile != "" {
|
||||
resolver, err := config.NewResolver(ctx, *configFile, logger, *insecureSkipVerify)
|
||||
if err != nil {
|
||||
logger.Error("could not load config file",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
if err = resolver.Bind(app, os.Args[1:]); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to bind configuration",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
// NOTE: This is temporary fix for issue #1092, calling kingpin.Parse
|
||||
// twice makes slices flags duplicate its value, this clean up
|
||||
// the first parse before the second call.
|
||||
*webConfig.WebListenAddresses = (*webConfig.WebListenAddresses)[1:]
|
||||
|
||||
// Parse flags once more to include those discovered in configuration file(s).
|
||||
if _, err = app.Parse(os.Args[1:]); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to parse CLI args from YAML file",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
logger, err = log.New(logConfig)
|
||||
if err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("failed to create logger",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
if configFile != nil && *configFile != "" {
|
||||
logger.InfoContext(ctx, "using configuration file: "+*configFile)
|
||||
}
|
||||
|
||||
logger.LogAttrs(ctx, slog.LevelDebug, "logging has Started")
|
||||
|
||||
if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil {
|
||||
logger.Error("failed to set process priority",
|
||||
if err = setPriorityWindows(ctx, logger, os.Getpid(), *processPriority); err != nil {
|
||||
logger.LogAttrs(ctx, slog.LevelError, "failed to set process priority",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -200,7 +162,7 @@ func run() int {
|
||||
|
||||
enabledCollectorList := expandEnabledCollectors(*enabledCollectors)
|
||||
if err := collectors.Enable(enabledCollectorList); err != nil {
|
||||
logger.Error("couldn't enable collectors",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "couldn't enable collectors",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -208,9 +170,9 @@ func run() int {
|
||||
}
|
||||
|
||||
// Initialize collectors before loading
|
||||
if err = collectors.Build(logger); err != nil {
|
||||
if err = collectors.Build(ctx, logger); err != nil {
|
||||
for _, err := range utils.SplitError(err) {
|
||||
logger.Error("couldn't initialize collector",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "couldn't initialize collector",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -265,17 +227,14 @@ func run() int {
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, os.Kill)
|
||||
defer stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Info("Shutting down windows_exporter via kill signal")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via kill signal")
|
||||
case <-stopCh:
|
||||
logger.Info("Shutting down windows_exporter via service control")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via service control")
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "Failed to start windows_exporter",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "Failed to start windows_exporter",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -286,9 +245,9 @@ func run() int {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_ = server.Shutdown(ctx)
|
||||
_ = server.Shutdown(ctx) //nolint:contextcheck // create a new context for server shutdown
|
||||
|
||||
logger.InfoContext(ctx, "windows_exporter has shut down")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "windows_exporter has shut down") //nolint:contextcheck
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -311,7 +270,7 @@ func logCurrentUser(logger *slog.Logger) {
|
||||
}
|
||||
|
||||
// setPriorityWindows sets the priority of the current process to the specified value.
|
||||
func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
|
||||
func setPriorityWindows(ctx context.Context, logger *slog.Logger, pid int, priority string) error {
|
||||
// Mapping of priority names to uin32 values required by windows.SetPriorityClass.
|
||||
priorityStringToInt := map[string]uint32{
|
||||
"realtime": windows.REALTIME_PRIORITY_CLASS,
|
||||
@@ -325,11 +284,11 @@ func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
|
||||
winPriority, ok := priorityStringToInt[priority]
|
||||
|
||||
// Only set process priority if a non-default and valid value has been set
|
||||
if !ok || winPriority != windows.NORMAL_PRIORITY_CLASS {
|
||||
if !ok || winPriority == windows.NORMAL_PRIORITY_CLASS {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.LogAttrs(context.Background(), slog.LevelDebug, "setting process priority to "+priority)
|
||||
logger.LogAttrs(ctx, slog.LevelDebug, "setting process priority to "+priority)
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
|
||||
handle, err := windows.OpenProcess(
|
||||
|
||||
188
cmd/windows_exporter/main_test.go
Normal file
188
cmd/windows_exporter/main_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//nolint:tparallel
|
||||
func TestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
args []string
|
||||
config string
|
||||
metricsEndpoint string
|
||||
exitCode int
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
args: []string{},
|
||||
metricsEndpoint: "http://127.0.0.1:9182/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address",
|
||||
args: []string{"--web.listen-address=127.0.0.1:8080"},
|
||||
metricsEndpoint: "http://127.0.0.1:8080/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address",
|
||||
args: []string{"--web.listen-address=127.0.0.1:8081", "--web.listen-address=[::1]:8081"},
|
||||
metricsEndpoint: "http://[::1]:8081/metrics",
|
||||
},
|
||||
{
|
||||
name: "config",
|
||||
args: []string{"--config.file=config.yaml"},
|
||||
config: `{"web":{"listen-address":"127.0.0.1:8082"}}`,
|
||||
metricsEndpoint: "http://127.0.0.1:8082/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address with config",
|
||||
args: []string{"--config.file=config.yaml", "--web.listen-address=127.0.0.1:8084"},
|
||||
config: `{"web":{"listen-address":"127.0.0.1:8083"}}`,
|
||||
metricsEndpoint: "http://127.0.0.1:8084/metrics",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if tc.config != "" {
|
||||
// Create a temporary config file.
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "config-*.yaml")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, tmpfile.Close())
|
||||
})
|
||||
|
||||
_, err = tmpfile.WriteString(tc.config)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, arg := range tc.args {
|
||||
tc.args[i] = strings.ReplaceAll(arg, "config.yaml", tmpfile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
exitCodeCh := make(chan int)
|
||||
|
||||
var stdout string
|
||||
|
||||
go func() {
|
||||
stdout = captureOutput(t, func() {
|
||||
// Simulate the service control manager signaling that we are done.
|
||||
exitCodeCh <- run(ctx, tc.args)
|
||||
})
|
||||
}()
|
||||
|
||||
t.Cleanup(func() {
|
||||
select {
|
||||
case exitCode := <-exitCodeCh:
|
||||
require.Equal(t, tc.exitCode, exitCode)
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatalf("timed out waiting for exit code, want %d", tc.exitCode)
|
||||
}
|
||||
})
|
||||
|
||||
if tc.exitCode != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
uri, err := url.Parse(tc.metricsEndpoint)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = waitUntilListening(t, "tcp", uri.Host)
|
||||
require.NoError(t, err, "LOGS:\n%s", stdout)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, tc.metricsEndpoint, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err, "LOGS:\n%s", stdout)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEmpty(t, body)
|
||||
require.Contains(t, string(body), "# HELP windows_exporter_build_info")
|
||||
|
||||
cancel()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func captureOutput(tb testing.TB, f func()) string {
|
||||
tb.Helper()
|
||||
|
||||
orig := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
f()
|
||||
|
||||
os.Stdout = orig
|
||||
|
||||
_ = w.Close()
|
||||
|
||||
out, _ := io.ReadAll(r)
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func waitUntilListening(tb testing.TB, network, address string) error {
|
||||
tb.Helper()
|
||||
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
)
|
||||
|
||||
for range 10 {
|
||||
conn, err = net.DialTimeout(network, address, 100*time.Millisecond)
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if errors.Is(err, windows.Errno(10061)) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("listener not listening: %w", err)
|
||||
}
|
||||
25
config.yaml
25
config.yaml
@@ -1,23 +1,2 @@
|
||||
# example configuration file for windows_exporter
|
||||
|
||||
collectors:
|
||||
enabled: cpu,cpu_info,exchange,iis,logical_disk,logon,memory,net,os,performancecounter,process,remote_fx,service,system,tcp,time,terminal_services,textfile
|
||||
collector:
|
||||
service:
|
||||
include: "windows_exporter"
|
||||
performancecounter:
|
||||
objects: |-
|
||||
- name: photon_udp
|
||||
object: "Photon Socket Server: UDP"
|
||||
instances: ["*"]
|
||||
counters:
|
||||
- name: "UDP: Datagrams in"
|
||||
metric: "photon_udp_datagrams"
|
||||
labels:
|
||||
direction: "in"
|
||||
- name: "UDP: Datagrams out"
|
||||
metric: "photon_udp_datagrams"
|
||||
labels:
|
||||
direction: "out"
|
||||
log:
|
||||
level: warn
|
||||
web:
|
||||
listen-address: ":9183"
|
||||
|
||||
@@ -2,49 +2,177 @@
|
||||
|
||||
The dhcp collector exposes DHCP Server metrics
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `dhcp`
|
||||
Data source | Perflib
|
||||
Classes | `DHCP Server`
|
||||
Enabled by default? | No
|
||||
| | |
|
||||
|---------------------|---------------|
|
||||
| Metric name prefix | `dhcp` |
|
||||
| Data source | Perflib |
|
||||
| Classes | `DHCP Server` |
|
||||
| Enabled by default? | No |
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
### `--collector.dhcp.enabled`
|
||||
|
||||
Comma-separated list of collectors to use. Defaults to all, if not specified.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`packets_received_total` | Total number of packets received by the DHCP server | counter | None
|
||||
`duplicates_dropped_total` | Total number of duplicate packets received by the DHCP server | counter | None
|
||||
`packets_expired_total` | Total number of packets expired in the DHCP server message queue | counter | None
|
||||
`active_queue_length` | Number of packets in the processing queue of the DHCP server | gauge | None
|
||||
`conflict_check_queue_length` | Number of packets in the DHCP server queue waiting on conflict detection (ping) | gauge | None
|
||||
`discovers_total` | Total DHCP Discovers received by the DHCP server | counter | None
|
||||
`offers_total` | Total DHCP Offers sent by the DHCP server | counter | None
|
||||
`requests_total` | Total DHCP Requests received by the DHCP server | counter | None
|
||||
`informs_total` | Total DHCP Informs received by the DHCP server | counter | None
|
||||
`acks_total` | Total DHCP Acks sent by the DHCP server | counter | None
|
||||
`nacks_total` | Total DHCP Nacks sent by the DHCP server | counter | None
|
||||
`declines_total` | Total DHCP Declines received by the DHCP server | counter | None
|
||||
`releases_total` | Total DHCP Releases received by the DHCP server | counter | None
|
||||
`offer_queue_length` | Number of packets in the offer queue of the DHCP server | gauge | None
|
||||
`denied_due_to_match_total` | Total number of DHCP requests denied, based on matches from the Deny List | gauge | None
|
||||
`denied_due_to_nonmatch_total` | Total number of DHCP requests denied, based on non-matches from the Allow List | gauge | None
|
||||
`failover_bndupd_sent_total` | Number of DHCP failover Binding Update messages sent | counter | None
|
||||
`failover_bndupd_received_total` | Number of DHCP failover Binding Update messages received | counter | None
|
||||
`failover_bndack_sent_total` | Number of DHCP failover Binding Ack messages sent | counter | None
|
||||
`failover_bndack_received_total` | Number of DHCP failover Binding Ack messages received | counter | None
|
||||
`failover_bndupd_pending_in_outbound_queue` | Number of pending outbound DHCP failover Binding Update messages | counter | None
|
||||
`failover_transitions_communicationinterrupted_state_total` | Total number of transitions into COMMUNICATION INTERRUPTED state | counter | None
|
||||
`failover_transitions_partnerdown_state_total` | Total number of transitions into PARTNER DOWN state | counter | None
|
||||
`failover_transitions_recover_total` | Total number of transitions into RECOVER state | counter | None
|
||||
`failover_bndupd_dropped_total` | Total number of DHCP faileover Binding Updates dropped | counter | None
|
||||
| Name | Description | Type | Labels |
|
||||
|--------------------------------------------------------------------------|--------------------------------------------------------------------------------|---------|-----------------------------------------------------|
|
||||
| `windows_dhcp_ack_total` | Total DHCP Acks sent by the DHCP server | counter | None |
|
||||
| `windows_dhcp_denied_due_to_match_total` | Total number of DHCP requests denied, based on matches from the Deny List | gauge | None |
|
||||
| `windows_dhcp_denied_due_to_nonmatch_total` | Total number of DHCP requests denied, based on non-matches from the Allow List | gauge | None |
|
||||
| `windows_dhcp_declines_total` | Total DHCP Declines received by the DHCP server | counter | None |
|
||||
| `windows_dhcp_discovers_total` | Total DHCP Discovers received by the DHCP server | counter | None |
|
||||
| `windows_dhcp_failover_bndack_received_total` | Number of DHCP failover Binding Ack messages received | counter | None |
|
||||
| `windows_dhcp_failover_bndack_sent_total` | Number of DHCP failover Binding Ack messages sent | counter | None |
|
||||
| `windows_dhcp_failover_bndupd_dropped_total` | Total number of DHCP failover Binding Updates dropped | counter | None |
|
||||
| `windows_dhcp_failover_bndupd_received_total` | Number of DHCP failover Binding Update messages received | counter | None |
|
||||
| `windows_dhcp_failover_bndupd_sent_total` | Number of DHCP failover Binding Update messages sent | counter | None |
|
||||
| `windows_dhcp_failover_bndupd_pending_in_outbound_queue` | Number of pending outbound DHCP failover Binding Update messages | counter | None |
|
||||
| `windows_dhcp_failover_transitions_communicationinterrupted_state_total` | Total number of transitions into COMMUNICATION INTERRUPTED state | counter | None |
|
||||
| `windows_dhcp_failover_transitions_partnerdown_state_total` | Total number of transitions into PARTNER DOWN state | counter | None |
|
||||
| `windows_dhcp_failover_transitions_recover_total` | Total number of transitions into RECOVER state | counter | None |
|
||||
| `windows_dhcp_informs_total` | Total DHCP Informs received by the DHCP server | counter | None |
|
||||
| `windows_dhcp_nacks_total` | Total DHCP Nacks sent by the DHCP server | counter | None |
|
||||
| `windows_dhcp_offers_total` | Total DHCP Offers sent by the DHCP server | counter | None |
|
||||
| `windows_dhcp_packets_expired_total` | Total number of packets expired in the DHCP server message queue | counter | None |
|
||||
| `windows_dhcp_packets_received_total` | Total number of packets received by the DHCP server | counter | None |
|
||||
| `windows_dhcp_pending_offers_total` | Total number of pending offers in the DHCP server | counter | None |
|
||||
| `windows_dhcp_releases_total` | Total DHCP Releases received by the DHCP server | counter | None |
|
||||
| `windows_dhcp_requests_total` | Total DHCP Requests received by the DHCP server | counter | None |
|
||||
| `windows_dhcp_scope_addresses_free_on_this_server` | DHCP Scope free addresses on this server | gauge | `scope` |
|
||||
| `windows_dhcp_scope_addresses_free_on_partner_server` | DHCP Scope free addresses on partner server | gauge | `scope` |
|
||||
| `windows_dhcp_scope_addresses_free` | DHCP Scope free addresses | gauge | `scope` |
|
||||
| `windows_dhcp_scope_addresses_in_use_on_this_server` | DHCP Scope addresses in use on this server | gauge | `scope` |
|
||||
| `windows_dhcp_scope_addresses_in_use_on_partner_server` | DHCP Scope addresses in use on partner server | gauge | `scope` |
|
||||
| `windows_dhcp_scope_addresses_in_use` | DHCP Scope addresses in use | gauge | `scope` |
|
||||
| `windows_dhcp_scope_info` | DHCP Scope information | gauge | `name`, `superscope_name`, `superscope_id`, `scope` |
|
||||
| `windows_dhcp_scope_pending_offers` | DHCP Scope pending offers | gauge | `scope` |
|
||||
| `windows_dhcp_scope_reserved_address` | DHCP Scope reserved addresses | gauge | `scope` |
|
||||
| `windows_dhcp_scope_state` | DHCP Scope state | gauge | `scope`, `state` |
|
||||
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
```
|
||||
# HELP windows_dhcp_acks_total Total DHCP Acks sent by the DHCP server (AcksTotal)
|
||||
# TYPE windows_dhcp_acks_total counter
|
||||
windows_dhcp_acks_total 0
|
||||
# HELP windows_dhcp_active_queue_length Number of packets in the processing queue of the DHCP server (ActiveQueueLength)
|
||||
# TYPE windows_dhcp_active_queue_length gauge
|
||||
windows_dhcp_active_queue_length 0
|
||||
# HELP windows_dhcp_conflict_check_queue_length Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)
|
||||
# TYPE windows_dhcp_conflict_check_queue_length gauge
|
||||
windows_dhcp_conflict_check_queue_length 0
|
||||
# HELP windows_dhcp_declines_total Total DHCP Declines received by the DHCP server (DeclinesTotal)
|
||||
# TYPE windows_dhcp_declines_total counter
|
||||
windows_dhcp_declines_total 0
|
||||
# HELP windows_dhcp_denied_due_to_match_total Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)
|
||||
# TYPE windows_dhcp_denied_due_to_match_total counter
|
||||
windows_dhcp_denied_due_to_match_total 0
|
||||
# HELP windows_dhcp_denied_due_to_nonmatch_total Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)
|
||||
# TYPE windows_dhcp_denied_due_to_nonmatch_total counter
|
||||
windows_dhcp_denied_due_to_nonmatch_total 0
|
||||
# HELP windows_dhcp_discovers_total Total DHCP Discovers received by the DHCP server (DiscoversTotal)
|
||||
# TYPE windows_dhcp_discovers_total counter
|
||||
windows_dhcp_discovers_total 0
|
||||
# HELP windows_dhcp_duplicates_dropped_total Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)
|
||||
# TYPE windows_dhcp_duplicates_dropped_total counter
|
||||
windows_dhcp_duplicates_dropped_total 0
|
||||
# HELP windows_dhcp_failover_bndack_received_total Number of DHCP fail over Binding Ack messages received (FailoverBndackReceivedTotal)
|
||||
# TYPE windows_dhcp_failover_bndack_received_total counter
|
||||
windows_dhcp_failover_bndack_received_total 0
|
||||
# HELP windows_dhcp_failover_bndack_sent_total Number of DHCP fail over Binding Ack messages sent (FailoverBndackSentTotal)
|
||||
# TYPE windows_dhcp_failover_bndack_sent_total counter
|
||||
windows_dhcp_failover_bndack_sent_total 0
|
||||
# HELP windows_dhcp_failover_bndupd_dropped_total Total number of DHCP fail over Binding Updates dropped (FailoverBndupdDropped)
|
||||
# TYPE windows_dhcp_failover_bndupd_dropped_total counter
|
||||
windows_dhcp_failover_bndupd_dropped_total 0
|
||||
# HELP windows_dhcp_failover_bndupd_pending_in_outbound_queue Number of pending outbound DHCP fail over Binding Update messages (FailoverBndupdPendingOutboundQueue)
|
||||
# TYPE windows_dhcp_failover_bndupd_pending_in_outbound_queue gauge
|
||||
windows_dhcp_failover_bndupd_pending_in_outbound_queue 0
|
||||
# HELP windows_dhcp_failover_bndupd_received_total Number of DHCP fail over Binding Update messages received (FailoverBndupdReceivedTotal)
|
||||
# TYPE windows_dhcp_failover_bndupd_received_total counter
|
||||
windows_dhcp_failover_bndupd_received_total 0
|
||||
# HELP windows_dhcp_failover_bndupd_sent_total Number of DHCP fail over Binding Update messages sent (FailoverBndupdSentTotal)
|
||||
# TYPE windows_dhcp_failover_bndupd_sent_total counter
|
||||
windows_dhcp_failover_bndupd_sent_total 0
|
||||
# HELP windows_dhcp_failover_transitions_communicationinterrupted_state_total Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)
|
||||
# TYPE windows_dhcp_failover_transitions_communicationinterrupted_state_total counter
|
||||
windows_dhcp_failover_transitions_communicationinterrupted_state_total 0
|
||||
# HELP windows_dhcp_failover_transitions_partnerdown_state_total Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)
|
||||
# TYPE windows_dhcp_failover_transitions_partnerdown_state_total counter
|
||||
windows_dhcp_failover_transitions_partnerdown_state_total 0
|
||||
# HELP windows_dhcp_failover_transitions_recover_total Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)
|
||||
# TYPE windows_dhcp_failover_transitions_recover_total counter
|
||||
windows_dhcp_failover_transitions_recover_total 0
|
||||
# HELP windows_dhcp_informs_total Total DHCP Informs received by the DHCP server (InformsTotal)
|
||||
# TYPE windows_dhcp_informs_total counter
|
||||
windows_dhcp_informs_total 0
|
||||
# HELP windows_dhcp_nacks_total Total DHCP Nacks sent by the DHCP server (NacksTotal)
|
||||
# TYPE windows_dhcp_nacks_total counter
|
||||
windows_dhcp_nacks_total 0
|
||||
# HELP windows_dhcp_offer_queue_length Number of packets in the offer queue of the DHCP server (OfferQueueLength)
|
||||
# TYPE windows_dhcp_offer_queue_length gauge
|
||||
windows_dhcp_offer_queue_length 0
|
||||
# HELP windows_dhcp_offers_total Total DHCP Offers sent by the DHCP server (OffersTotal)
|
||||
# TYPE windows_dhcp_offers_total counter
|
||||
windows_dhcp_offers_total 0
|
||||
# HELP windows_dhcp_packets_expired_total Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)
|
||||
# TYPE windows_dhcp_packets_expired_total counter
|
||||
windows_dhcp_packets_expired_total 0
|
||||
# HELP windows_dhcp_packets_received_total Total number of packets received by the DHCP server (PacketsReceivedTotal)
|
||||
# TYPE windows_dhcp_packets_received_total counter
|
||||
windows_dhcp_packets_received_total 0
|
||||
# HELP windows_dhcp_releases_total Total DHCP Releases received by the DHCP server (ReleasesTotal)
|
||||
# TYPE windows_dhcp_releases_total counter
|
||||
windows_dhcp_releases_total 0
|
||||
# HELP windows_dhcp_requests_total Total DHCP Requests received by the DHCP server (RequestsTotal)
|
||||
# TYPE windows_dhcp_requests_total counter
|
||||
windows_dhcp_requests_total 0
|
||||
# HELP windows_dhcp_scope_addresses_free_total DHCP Scope free addresses
|
||||
# TYPE windows_dhcp_scope_addresses_free_total gauge
|
||||
windows_dhcp_scope_addresses_free_total{scope="10.11.12.0/25"} 0
|
||||
windows_dhcp_scope_addresses_free_total{scope="172.16.0.0/24"} 0
|
||||
windows_dhcp_scope_addresses_free_total{scope="192.168.0.0/24"} 231
|
||||
# HELP windows_dhcp_scope_addresses_in_use_total DHCP Scope addresses in use
|
||||
# TYPE windows_dhcp_scope_addresses_in_use_total gauge
|
||||
windows_dhcp_scope_addresses_in_use_total{scope="10.11.12.0/25"} 0
|
||||
windows_dhcp_scope_addresses_in_use_total{scope="172.16.0.0/24"} 0
|
||||
windows_dhcp_scope_addresses_in_use_total{scope="192.168.0.0/24"} 0
|
||||
# HELP windows_dhcp_scope_info DHCP Scope information
|
||||
# TYPE windows_dhcp_scope_info gauge
|
||||
windows_dhcp_scope_info{name="SUBSUPERSCOPE",scope="172.16.0.0/24",superscope_id="2",superscope_name="SUPERSCOPE"} 1
|
||||
windows_dhcp_scope_info{name="TEST",scope="192.168.0.0/24",superscope_id="0",superscope_name=""} 1
|
||||
windows_dhcp_scope_info{name="TEST2",scope="10.11.12.0/25",superscope_id="2",superscope_name="SUPERSCOPE"} 1
|
||||
# HELP windows_dhcp_scope_pending_offers_total DHCP Scope pending offers
|
||||
# TYPE windows_dhcp_scope_pending_offers_total gauge
|
||||
windows_dhcp_scope_pending_offers_total{scope="10.11.12.0/25"} 0
|
||||
windows_dhcp_scope_pending_offers_total{scope="172.16.0.0/24"} 0
|
||||
windows_dhcp_scope_pending_offers_total{scope="192.168.0.0/24"} 0
|
||||
# HELP windows_dhcp_scope_reserved_address_total DHCP Scope reserved addresses
|
||||
# TYPE windows_dhcp_scope_reserved_address_total gauge
|
||||
windows_dhcp_scope_reserved_address_total{scope="10.11.12.0/25"} 0
|
||||
windows_dhcp_scope_reserved_address_total{scope="172.16.0.0/24"} 0
|
||||
windows_dhcp_scope_reserved_address_total{scope="192.168.0.0/24"} 2
|
||||
# HELP windows_dhcp_scope_state DHCP Scope state
|
||||
# TYPE windows_dhcp_scope_state gauge
|
||||
windows_dhcp_scope_state{scope="10.11.12.0/25",state="Disabled"} 1
|
||||
windows_dhcp_scope_state{scope="10.11.12.0/25",state="DisabledSwitched"} 0
|
||||
windows_dhcp_scope_state{scope="10.11.12.0/25",state="Enabled"} 0
|
||||
windows_dhcp_scope_state{scope="10.11.12.0/25",state="EnabledSwitched"} 0
|
||||
windows_dhcp_scope_state{scope="10.11.12.0/25",state="InvalidState"} 0
|
||||
windows_dhcp_scope_state{scope="172.16.0.0/24",state="Disabled"} 1
|
||||
windows_dhcp_scope_state{scope="172.16.0.0/24",state="DisabledSwitched"} 0
|
||||
windows_dhcp_scope_state{scope="172.16.0.0/24",state="Enabled"} 0
|
||||
windows_dhcp_scope_state{scope="172.16.0.0/24",state="EnabledSwitched"} 0
|
||||
windows_dhcp_scope_state{scope="172.16.0.0/24",state="InvalidState"} 0
|
||||
windows_dhcp_scope_state{scope="192.168.0.0/24",state="Disabled"} 0
|
||||
windows_dhcp_scope_state{scope="192.168.0.0/24",state="DisabledSwitched"} 0
|
||||
windows_dhcp_scope_state{scope="192.168.0.0/24",state="Enabled"} 1
|
||||
windows_dhcp_scope_state{scope="192.168.0.0/24",state="EnabledSwitched"} 0
|
||||
windows_dhcp_scope_state{scope="192.168.0.0/24",state="InvalidState"} 0
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
@@ -3,14 +3,19 @@
|
||||
The dns collector exposes metrics about the DNS server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `dns`
|
||||
Classes | [`Win32_PerfRawData_DNS_DNS`](https://technet.microsoft.com/en-us/library/cc977686.aspx)
|
||||
Enabled by default? | No
|
||||
-|-|-
|
||||
Metric name prefix | `dns` |
|
||||
Classes | [`Win32_PerfRawData_DNS_DNS`](https://technet.microsoft.com/en-us/library/cc977686.aspx) |
|
||||
Enabled by default | Yes |
|
||||
Metric name prefix (error stats) | `windows_dns` |
|
||||
Classes | [`MicrosoftDNS_Statistic`](https://learn.microsoft.com/en-us/windows/win32/dns/dns-wmi-provider-overview) |
|
||||
Enabled by default (error stats)? | Yes |
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
Name | Description
|
||||
-----|------------
|
||||
`collector.dns.enabled` | Comma-separated list of collectors to use. Available collectors: `metrics`, `error_stats`. Defaults to all collectors if not specified.
|
||||
|
||||
## Metrics
|
||||
|
||||
@@ -38,12 +43,56 @@ Name | Description | Type | Labels
|
||||
`windows_dns_wins_queries_total` | _Not yet documented_ | counter | `direction`
|
||||
`windows_dns_wins_responses_total` | _Not yet documented_ | counter | `direction`
|
||||
`windows_dns_unmatched_responses_total` | _Not yet documented_ | counter | None
|
||||
`windows_dns_error_stats_total` | DNS error statistics from MicrosoftDNS_Statistic | counter | `name`, `collection_name`, `dns_server`
|
||||
|
||||
### Sub-collectors
|
||||
|
||||
The DNS collector is split into two sub-collectors:
|
||||
|
||||
1. `metrics` - Collects standard DNS performance metrics using PDH (Performance Data Helper)
|
||||
2. `wmi_stats` - Collects DNS error statistics from the MicrosoftDNS_Statistic WMI class
|
||||
|
||||
By default, both sub-collectors are enabled. You can enable specific sub-collectors using the `collector.dns.enabled` flag.
|
||||
|
||||
### Example Usage
|
||||
|
||||
To enable only DNS error statistics collection:
|
||||
```powershell
|
||||
windows_exporter.exe --collector.dns.enabled=wmi_stats
|
||||
```
|
||||
|
||||
To enable only standard DNS metrics:
|
||||
```powershell
|
||||
windows_exporter.exe --collector.dns.enabled=metrics
|
||||
```
|
||||
|
||||
To enable both (default behavior):
|
||||
```powershell
|
||||
windows_exporter.exe --collector.dns.enabled=metrics,wmi_stats
|
||||
```
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
```
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="BadKey"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="BadSig"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="BadTime"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="FormError"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="Max"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NoError"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NotAuth"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NotImpl"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NotZone"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NxDomain"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="NxRRSet"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="Refused"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="ServFail"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="UnknownError"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="YxDomain"} 0
|
||||
windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5NNM8M1",name="YxRRSet"} 0
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -43,7 +43,7 @@ Comma-separated list of collectors to use, for example: `--collectors.exchange.e
|
||||
| `windows_exchange_transport_queues_messages_submitted_total` | Messages Submitted Total |
|
||||
| `windows_exchange_transport_queues_messages_delayed_total` | Messages Delayed Total |
|
||||
| `windows_exchange_transport_queues_messages_completed_delivery_total` | Messages Completed Delivery Total |
|
||||
| `windows_exchange_transport_queues_shadow_queue_length` | Shadow Queue Length |
|
||||
| `windows_exchange_transport_queues_aggregate_shadow_queue_length` | The current number of messages in shadow queues |
|
||||
| `windows_exchange_transport_queues_submission_queue_length` | Submission Queue Length |
|
||||
| `windows_exchange_transport_queues_delay_queue_length` | Delay Queue Length |
|
||||
| `windows_exchange_transport_queues_items_completed_delivery_total` | Items Completed Delivery Total |
|
||||
@@ -54,7 +54,7 @@ Comma-separated list of collectors to use, for example: `--collectors.exchange.e
|
||||
| `windows_exchange_http_proxy_avg_auth_latency` | Average time spent authenticating CAS requests over the last 200 samples |
|
||||
| `windows_exchange_http_proxy_outstanding_proxy_requests` | Number of concurrent outstanding proxy requests |
|
||||
| `windows_exchange_http_proxy_requests_total` | Number of proxy requests processed each second |
|
||||
| `windows_exchange_avail_service_requests_per_sec` | Number of requests serviced per second |
|
||||
| `windows_exchange_availability_service_requests_per_sec` | Number of requests serviced per second |
|
||||
| `windows_exchange_owa_current_unique_users` | Number of unique users currently logged on to Outlook Web App |
|
||||
| `windows_exchange_owa_requests_total` | Number of requests handled by Outlook Web App per second |
|
||||
| `windows_exchange_autodiscover_requests_total` | Number of autodiscover service requests processed each second |
|
||||
@@ -77,4 +77,3 @@ _This collector does not yet have any useful queries added, we would appreciate
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
|
||||
@@ -69,6 +69,23 @@ Show volume usage (%)
|
||||
100.0 - 100 * (windows_logical_disk_free_bytes{instance="localhost", volume="C:"} / windows_logical_disk_size_bytes{instance="localhost", volume="C:"})
|
||||
```
|
||||
|
||||
Disk Activity
|
||||
```promql
|
||||
(
|
||||
rate(windows_logical_disk_read_seconds_total[2m])
|
||||
+
|
||||
rate(windows_logical_disk_write_seconds_total[2m])
|
||||
)
|
||||
/
|
||||
(
|
||||
rate(windows_logical_disk_read_seconds_total[2m])
|
||||
+
|
||||
rate(windows_logical_disk_write_seconds_total[2m])
|
||||
+
|
||||
rate(windows_logical_disk_idle_seconds_total[2m])
|
||||
)
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
# logon collector
|
||||
|
||||
The logon collector exposes metrics detailing the active user logon sessions.
|
||||
|
||||
| | |
|
||||
|---------------------|-----------|
|
||||
| Metric name prefix | `logon` |
|
||||
| Source | Win32 API |
|
||||
| Enabled by default? | No |
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|-------------------------------------------|--------------------------------------------|-------|------------------------------------|
|
||||
| `windows_logon_session_logon_timestamp_seconds` | timestamp of the logon session in seconds. | gauge | `domain`, `id`, `type`, `username` |
|
||||
|
||||
### Example metric
|
||||
Query the total number of interactive logon sessions
|
||||
```
|
||||
# HELP windows_logon_session_logon_timestamp_seconds timestamp of the logon session in seconds.
|
||||
# TYPE windows_logon_session_logon_timestamp_seconds gauge
|
||||
windows_logon_session_logon_timestamp_seconds{domain="",id="0x0:0x8c54",type="System",username=""} 1.72876928e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x991a",type="Interactive",username="UMFD-1"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x9933",type="Interactive",username="UMFD-0"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x994a",type="Interactive",username="UMFD-0"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x999d",type="Interactive",username="UMFD-1"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf25a",type="Interactive",username="UMFD-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf290",type="Interactive",username="UMFD-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x130241",type="Network",username="vm-jok-dev$"} 1.728769625e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x24f7c9",type="Network",username="vm-jok-dev$"} 1.728770121e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x276846",type="Network",username="vm-jok-dev$"} 1.728770195e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e4",type="Service",username="vm-jok-dev$"} 1.728769283e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e7",type="System",username="vm-jok-dev$"} 1.728769279e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x71d0f",type="Network",username="vm-jok-dev$"} 1.728769324e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x720a3",type="Network",username="vm-jok-dev$"} 1.728769324e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x725cb",type="Network",username="vm-jok-dev$"} 1.728769324e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x753d8",type="Network",username="vm-jok-dev$"} 1.728769325e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xa3913",type="Network",username="vm-jok-dev$"} 1.728769385e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xbe7f2",type="Network",username="jok"} 1.728769531e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xc76c4",type="RemoteInteractive",username="jok"} 1.728769533e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e3",type="Service",username="IUSR"} 1.728769295e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e5",type="Service",username="LOCAL SERVICE"} 1.728769283e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xae4c7",type="Service",username="MSSQLSERVER"} 1.728769425e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xb42f1",type="Service",username="SQLTELEMETRY"} 1.728769431e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfbac",type="Interactive",username="DWM-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfc72",type="Interactive",username="DWM-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdedd",type="Interactive",username="DWM-1"} 1.728769283e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdefd",type="Interactive",username="DWM-1"} 1.728769283e+09
|
||||
```
|
||||
|
||||
### Possible values for `type`
|
||||
|
||||
- System
|
||||
- Interactive
|
||||
- Network
|
||||
- Batch
|
||||
- Service
|
||||
- Proxy
|
||||
- Unlock
|
||||
- NetworkCleartext
|
||||
- NewCredentials
|
||||
- RemoteInteractive
|
||||
- CachedInteractive
|
||||
- CachedRemoteInteractive
|
||||
- CachedUnlock
|
||||
|
||||
## Useful queries
|
||||
Query the total number of local and remote (I.E. Terminal Services) interactive sessions.
|
||||
```
|
||||
count(windows_logon_logon_type{type=~"Interactive|RemoteInteractive"}) by (type)
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector doesn’t yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -19,6 +19,10 @@ If given, an interface name needs to match the include regexp in order for the c
|
||||
|
||||
If given, an interface name needs to *not* match the exclude regexp in order for the corresponding metrics to be reported
|
||||
|
||||
### `--collector.net.enabled`
|
||||
|
||||
Comma-separated list of collectors to use. Defaults to all, if not specified.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
|
||||
@@ -10,7 +10,9 @@ The netframework collector exposes metrics about dotnet framework.
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
### `--collector.netframework.enabled`
|
||||
|
||||
Comma-separated list of collectors to use. Defaults to all, if not specified.
|
||||
|
||||
## Metrics
|
||||
|
||||
|
||||
@@ -21,19 +21,21 @@ If given, a disk needs to *not* match the exclude regexp in order for the corres
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `disk`
|
||||
`read_bytes_total` | Rate at which bytes are transferred from the disk during read operations | counter | `disk`
|
||||
`reads_total` | Rate of read operations on the disk | counter | `disk`
|
||||
`write_bytes_total` | Rate at which bytes are transferred to the disk during write operations | counter | `disk`
|
||||
`writes_total` | Rate of write operations on the disk | counter | `disk`
|
||||
`read_seconds_total` | Seconds the disk was busy servicing read requests | counter | `disk`
|
||||
`write_seconds_total` | Seconds the disk was busy servicing write requests | counter | `disk`
|
||||
`free_bytes` | Unused space of the disk in bytes (not real time, updates every 10-15 min) | gauge | `disk`
|
||||
`size_bytes` | Total size of the disk in bytes (not real time, updates every 10-15 min) | gauge | `disk`
|
||||
`idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `disk`
|
||||
`split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `disk`
|
||||
| Name | Description | Type | Labels |
|
||||
|--------------------------------------------------------|---------------------------------------------------------------------------------------------------------|---------|--------|
|
||||
| windows_physical_disk_requests_queued | The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength) | Gauge | disk |
|
||||
| windows_physical_disk_read_bytes_total | The number of bytes transferred from the disk during read operations (PhysicalDisk.DiskReadBytesPerSec) | Counter | disk |
|
||||
| windows_physical_disk_reads_total | The number of read operations on the disk (PhysicalDisk.DiskReadsPerSec) | Counter | disk |
|
||||
| windows_physical_disk_write_bytes_total | The number of bytes transferred to the disk during write operations (PhysicalDisk.DiskWriteBytesPerSec) | Counter | disk |
|
||||
| windows_physical_disk_writes_total | The number of write operations on the disk (PhysicalDisk.DiskWritesPerSec) | Counter | disk |
|
||||
| windows_physical_disk_read_seconds_total | Seconds that the disk was busy servicing read requests (PhysicalDisk.PercentDiskReadTime) | Counter | disk |
|
||||
| windows_physical_disk_write_seconds_total | Seconds that the disk was busy servicing write requests (PhysicalDisk.PercentDiskWriteTime) | Counter | disk |
|
||||
| windows_physical_disk_idle_seconds_total | Seconds that the disk was idle (PhysicalDisk.PercentIdleTime) | Counter | disk |
|
||||
| windows_physical_disk_split_ios_total | The number of I/Os to the disk that were split into multiple I/Os (PhysicalDisk.SplitIOPerSec) | Counter | disk |
|
||||
| windows_physical_disk_read_latency_seconds_total | The average time, in seconds, of a read operation from the disk (PhysicalDisk.AvgDiskSecPerRead) | Counter | disk |
|
||||
| windows_physical_disk_write_latency_seconds_total | The average time, in seconds, of a write operation to the disk (PhysicalDisk.AvgDiskSecPerWrite) | Counter | disk |
|
||||
| windows_physical_disk_read_write_latency_seconds_total | The time, in seconds, of the average disk transfer (PhysicalDisk.AvgDiskSecPerTransfer) | Counter | disk |
|
||||
|
||||
|
||||
### Warning about size metrics
|
||||
The `free_bytes` and `size_bytes` metrics are not updated in real time and might have a delay of 10-15min.
|
||||
@@ -52,29 +54,4 @@ rate(windows_physical_disk_reads_total{instance="localhost", disk=~"0"}[2m]) + r
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
groups:
|
||||
- name: Windows Disk Alerts
|
||||
rules:
|
||||
|
||||
# Sends an alert when disk space usage is above 95%
|
||||
- alert: DiskSpaceUsage
|
||||
expr: 100.0 - 100 * (windows_physical_disk_free_bytes / windows_physical_disk_size_bytes) > 95
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Disk Space Usage (instance {{ $labels.instance }})"
|
||||
description: "Disk Space on Drive is used more than 95%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
|
||||
# Alerts on disks with over 85% space usage predicted to fill within the next four days
|
||||
- alert: DiskFilling
|
||||
expr: 100 * (windows_physical_disk_free_bytes / windows_physical_disk_size_bytes) < 15 and predict_linear(windows_physical_disk_free_bytes[6h], 4 * 24 * 3600) < 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Disk full in four days (instance {{ $labels.instance }})"
|
||||
description: "{{ $labels.disk }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -16,7 +16,7 @@ None
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|--------|
|
||||
| `windows_system_boot_time_timestamp_seconds` | Unix timestamp of last system boot | gauge | None |
|
||||
| `windows_system_boot_time_timestamp` | Unix timestamp of last system boot | gauge | None |
|
||||
| `windows_system_context_switches_total` | Total number of [context switches](https://en.wikipedia.org/wiki/Context_switch) | counter | None |
|
||||
| `windows_system_exception_dispatches_total` | Total exceptions dispatched by the system | counter | None |
|
||||
| `windows_system_processes` | Number of process contexts currently loaded or running on the operating system | gauge | None |
|
||||
@@ -41,7 +41,7 @@ windows_system_processes{instance="localhost"}
|
||||
## Useful queries
|
||||
Find hosts that have rebooted in the last 24 hours
|
||||
```
|
||||
time() - windows_system_boot_time_timestamp_seconds < 86400
|
||||
time() - windows_system_boot_time_timestamp < 86400
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
|
||||
@@ -21,16 +21,17 @@ Matching is case-sensitive.
|
||||
|
||||
## Metrics
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------|
|
||||
| `windows_time_clock_frequency_adjustment_ppb_total` | Total adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | counter | None |
|
||||
| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None |
|
||||
| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None |
|
||||
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
|
||||
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
|
||||
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
|
||||
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None |
|
||||
| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` |
|
||||
| Name | Description | Type | Labels |
|
||||
|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------|
|
||||
| `windows_time_clock_frequency_adjustment` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None |
|
||||
| `windows_time_clock_frequency_adjustment_ppb` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None |
|
||||
| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None |
|
||||
| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None |
|
||||
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
|
||||
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
|
||||
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
|
||||
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None |
|
||||
| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` |
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
35
go.mod
35
go.mod
@@ -1,20 +1,21 @@
|
||||
module github.com/prometheus-community/windows_exporter
|
||||
|
||||
go 1.23
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.4
|
||||
|
||||
require (
|
||||
github.com/Microsoft/hcsshim v0.12.9
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.0
|
||||
github.com/dimchansky/utfbom v1.1.1
|
||||
github.com/go-ole/go-ole v1.3.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common v0.61.0
|
||||
github.com/prometheus/exporter-toolkit v0.13.2
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/prometheus/exporter-toolkit v0.14.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/sys v0.29.0
|
||||
golang.org/x/sys v0.31.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
@@ -23,7 +24,7 @@ require (
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.4 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
@@ -32,7 +33,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.1 // indirect
|
||||
github.com/mdlayher/vsock v1.2.1 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
@@ -43,13 +44,13 @@ require (
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/net v0.32.0 // indirect
|
||||
golang.org/x/oauth2 v0.24.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect
|
||||
google.golang.org/grpc v1.68.0 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/net v0.37.0 // indirect
|
||||
golang.org/x/oauth2 v0.28.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
google.golang.org/grpc v1.71.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
62
go.sum
62
go.sum
@@ -10,15 +10,15 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vS
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.0 h1:DBvuZxjdKkRP/dr4GVV4w2fnmrk5Hxc90T51LZjv0JA=
|
||||
github.com/bmatcuk/doublestar/v4 v4.9.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4=
|
||||
github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
|
||||
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
@@ -66,14 +66,12 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -92,15 +90,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ=
|
||||
github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
|
||||
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
@@ -128,8 +126,8 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -145,30 +143,30 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
|
||||
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -186,15 +184,15 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
|
||||
google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
|
||||
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
|
||||
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -204,8 +202,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -28,7 +28,7 @@ Copy-Item -Force $PathToExecutable Work/windows_exporter.exe
|
||||
Write-Verbose "Creating windows_exporter-${Version}-${Arch}.msi"
|
||||
$wixArch = @{"amd64" = "x64"; "arm64" = "arm64"}[$Arch]
|
||||
|
||||
Invoke-Expression "wix build -arch $wixArch -o .\windows_exporter-$($Version)-$($Arch).msi .\files.wxs .\main.wxs -d ProductName=windows_exporter -d Version=$($MsiVersion) -ext WixToolset.Firewall.wixext -ext WixToolset.UI.wixext -ext WixToolset.Util.wixext"
|
||||
Invoke-Expression "wix build -sw1149 -arch $wixArch -o .\windows_exporter-$($Version)-$($Arch).msi .\files.wxs .\main.wxs -d ProductName=windows_exporter -d Version=$($MsiVersion) -ext WixToolset.Firewall.wixext -ext WixToolset.UI.wixext -ext WixToolset.Util.wixext"
|
||||
|
||||
Write-Verbose "Done!"
|
||||
Pop-Location
|
||||
|
||||
BIN
installer/codesign.cer
Normal file
BIN
installer/codesign.cer
Normal file
Binary file not shown.
@@ -44,6 +44,12 @@
|
||||
<ServiceDependency Id="wmiApSrv" />
|
||||
</ServiceInstall>
|
||||
<ServiceControl Id="ServiceStateControl" Name="windows_exporter" Remove="uninstall" Start="install" Stop="both"/>
|
||||
<!-- The "Name" field must match the argument to eventlog.Open() -->
|
||||
<util:EventSource Log="Application" Name="windows_exporter"
|
||||
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
|
||||
SupportsErrors="yes"
|
||||
SupportsInformationals="yes"
|
||||
SupportsWarnings="yes"/>
|
||||
</Component>
|
||||
<Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14">
|
||||
<CreateFolder />
|
||||
|
||||
@@ -43,6 +43,9 @@
|
||||
Property="OLDERVERSIONBEINGUPGRADED" />
|
||||
</Upgrade>
|
||||
|
||||
<CustomAction Id="CheckExtraFlags"
|
||||
Error="The parameter '--config.file' must not be included in EXTRA_FLAGS. Use CONFIG_FILE instead. Please remove it and try again." />
|
||||
|
||||
<CustomAction Id="set_maintenance" Property="MAINTENANCE" Value="true" />
|
||||
|
||||
<!-- Set to reinstall all features. -->
|
||||
@@ -54,12 +57,12 @@
|
||||
<CustomAction Id="set_reinstallmode_property"
|
||||
Property="REINSTALLMODE"
|
||||
Value="amus" />
|
||||
<!-- START CUSTOM ACTION FOR CONFIG FILE CREATION -->
|
||||
<SetProperty
|
||||
Id="CreateConfigFile"
|
||||
Value=""[%ComSpec]" /c TYPE NUL >>"[ConfigFile_NonDefault][ConfigFile_Default]""
|
||||
Before="CreateConfigFile"
|
||||
Sequence="execute"
|
||||
Condition="ConfigFile_NonDefault OR ConfigFile_Default"
|
||||
/>
|
||||
<CustomAction
|
||||
Id="CreateConfigFile"
|
||||
@@ -69,6 +72,24 @@
|
||||
Return="check"
|
||||
Impersonate="no"
|
||||
/>
|
||||
<!-- END CUSTOM ACTION FOR CONFIG FILE CREATION -->
|
||||
|
||||
<!-- START CUSTOM ACTION FOR KILLING THE PROCESS -->
|
||||
<SetProperty
|
||||
Id="KillProcess"
|
||||
Value=""[WindowsFolder]\System32\taskkill.exe" /T /F /IM windows_exporter.exe"
|
||||
Before="KillProcess"
|
||||
Sequence="execute"
|
||||
/>
|
||||
<CustomAction
|
||||
Id="KillProcess"
|
||||
BinaryRef="Wix4UtilCA_$(sys.BUILDARCHSHORT)"
|
||||
DllEntry="WixQuietExec"
|
||||
Execute="deferred"
|
||||
Return="ignore"
|
||||
Impersonate="no"
|
||||
/>
|
||||
<!-- END CUSTOM ACTION FOR KILLING THE PROCESS -->
|
||||
|
||||
<InstallExecuteSequence>
|
||||
<!-- Set REINSTALL=all and REINSTALLMODE=amus if the user reruns the
|
||||
@@ -78,6 +99,10 @@
|
||||
<Custom Action="set_reinstall_all_property" Before="set_reinstallmode_property" Condition="MAINTENANCE"/>
|
||||
<Custom Action="set_reinstallmode_property" Before="LaunchConditions" Condition="MAINTENANCE"/>
|
||||
<Custom Action="CreateConfigFile" Before="InstallServices" Condition="ConfigFile_NonDefault OR ConfigFile_Default" />
|
||||
<Custom Action="KillProcess" Before="RemoveFiles" />
|
||||
|
||||
<Custom Action="CheckExtraFlags" Before="InstallInitialize"
|
||||
Condition="EXTRA_FLAGS AND (EXTRA_FLAGS><"--config.file")" />
|
||||
</InstallExecuteSequence>
|
||||
|
||||
<Media Id="1" Cabinet="windows_exporter.cab" EmbedCab="yes" />
|
||||
@@ -90,9 +115,10 @@
|
||||
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]" Condition="EXTRA_FLAGS" />
|
||||
|
||||
<Property Id="CONFIG_FILE" Secure="yes" Value="config.yaml" />
|
||||
<SetProperty Id="ConfigFile_NonDefault" After="InstallFiles" Sequence="execute" Value="[CONFIG_FILE]" Condition="CONFIG_FILE AND CONFIG_FILE<>"config.yaml"" />
|
||||
<SetProperty Id="ConfigFile_Remote" After="InstallFiles" Sequence="execute" Value="[CONFIG_FILE]" Condition="CONFIG_FILE AND (CONFIG_FILE<<"http://" OR CONFIG_FILE<<"https://")" />
|
||||
<SetProperty Id="ConfigFile_NonDefault" After="InstallFiles" Sequence="execute" Value="[CONFIG_FILE]" Condition="CONFIG_FILE AND CONFIG_FILE<>"config.yaml" AND NOT (CONFIG_FILE<<"http://" OR CONFIG_FILE<<"https://")" />
|
||||
<SetProperty Id="ConfigFile_Default" After="InstallFiles" Sequence="execute" Value="[APPLICATIONFOLDER]config.yaml" Condition="CONFIG_FILE="config.yaml"" />
|
||||
<SetProperty Id="ConfigFileFlag" After="InstallFiles" Sequence="execute" Value="--config.file="[ConfigFile_NonDefault][ConfigFile_Default]"" Condition="ConfigFile_NonDefault OR ConfigFile_Default" />
|
||||
<SetProperty Id="ConfigFileFlag" After="InstallFiles" Sequence="execute" Value="--config.file="[ConfigFile_Remote][ConfigFile_NonDefault][ConfigFile_Default]"" Condition="ConfigFile_Remote OR ConfigFile_NonDefault OR ConfigFile_Default" />
|
||||
|
||||
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
|
||||
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--web.listen-address [LISTEN_ADDR]:[LISTEN_PORT]" Condition="LISTEN_ADDR<>"" OR LISTEN_PORT<>9182" />
|
||||
|
||||
@@ -130,13 +130,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
||||
c.addressBookOperationsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"),
|
||||
"",
|
||||
@@ -511,6 +504,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -520,6 +520,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -82,13 +82,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
|
||||
c.requestsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Total certificate requests processed",
|
||||
@@ -168,6 +161,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -112,13 +112,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
||||
c.adLoginConnectionFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
|
||||
"Total number of connection failures to an Active Directory domain controller",
|
||||
@@ -378,6 +371,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -385,6 +385,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect ADFS metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect ADFS metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
16
internal/collector/cache/cache.go
vendored
16
internal/collector/cache/cache.go
vendored
@@ -98,13 +98,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
|
||||
c.asyncCopyReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
|
||||
"(AsyncCopyReadsTotal)",
|
||||
@@ -280,6 +273,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -288,6 +288,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Cache metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect Cache metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -89,15 +89,8 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.mu = sync.Mutex{}
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
|
||||
c.logicalProcessors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logical_processor"),
|
||||
"Total number of logical processors",
|
||||
@@ -186,6 +179,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
c.processorRTCValues = map[string]utils.Counter{}
|
||||
c.processorMPerfValues = map[string]utils.Counter{}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -75,18 +75,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.cpuInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "", Name),
|
||||
"Labelled CPU information as provided by Win32_Processor",
|
||||
@@ -148,6 +136,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
var dst []miProcessor
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
|
||||
@@ -37,7 +37,7 @@ type Collector struct {
|
||||
config Config
|
||||
|
||||
// physicalMemoryBytes
|
||||
// Deprecated: Use windows_physical_memory_total_bytes instead
|
||||
// Deprecated: Use windows_memory_physical_total_bytes instead
|
||||
physicalMemoryBytes *prometheus.Desc
|
||||
// logicalProcessors
|
||||
// Deprecated: Use windows_cpu_logical_processor instead
|
||||
@@ -85,7 +85,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
)
|
||||
c.physicalMemoryBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "physical_memory_bytes"),
|
||||
"Deprecated: Use windows_physical_memory_total_bytes instead",
|
||||
"Deprecated: Use windows_memory_physical_total_bytes instead",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -160,29 +160,6 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
|
||||
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// connection
|
||||
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
|
||||
@@ -473,13 +450,36 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect implements the Collector interface.
|
||||
// Sends metric values for each metric to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 3)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
errs = append(errs, c.collectPDHConnection(ch))
|
||||
|
||||
@@ -16,22 +16,39 @@
|
||||
package dhcp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/dhcpsapi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const Name = "dhcp"
|
||||
const (
|
||||
Name = "dhcp"
|
||||
|
||||
type Config struct{}
|
||||
subCollectorServerMetrics = "server_metrics"
|
||||
subCollectorScopeMetrics = "scope_metrics"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
subCollectorServerMetrics,
|
||||
subCollectorScopeMetrics,
|
||||
},
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector perflib DHCP metrics.
|
||||
type Collector struct {
|
||||
@@ -65,6 +82,17 @@ type Collector struct {
|
||||
packetsReceivedTotal *prometheus.Desc
|
||||
releasesTotal *prometheus.Desc
|
||||
requestsTotal *prometheus.Desc
|
||||
|
||||
scopeInfo *prometheus.Desc
|
||||
scopeState *prometheus.Desc
|
||||
scopeAddressesFreeTotal *prometheus.Desc
|
||||
scopeAddressesFreeOnPartnerServerTotal *prometheus.Desc
|
||||
scopeAddressesFreeOnThisServerTotal *prometheus.Desc
|
||||
scopeAddressesInUseTotal *prometheus.Desc
|
||||
scopeAddressesInUseOnPartnerServerTotal *prometheus.Desc
|
||||
scopeAddressesInUseOnThisServerTotal *prometheus.Desc
|
||||
scopePendingOffersTotal *prometheus.Desc
|
||||
scopeReservedAddressTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -72,6 +100,10 @@ func New(config *Config) *Collector {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.CollectorsEnabled == nil {
|
||||
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
@@ -79,8 +111,26 @@ func New(config *Config) *Collector {
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
c.config.CollectorsEnabled = make([]string, 0)
|
||||
|
||||
var collectorsEnabled string
|
||||
|
||||
app.Flag(
|
||||
"collector.dhcp.enabled",
|
||||
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
|
||||
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
@@ -88,7 +138,9 @@ func (c *Collector) GetName() string {
|
||||
}
|
||||
|
||||
func (c *Collector) Close() error {
|
||||
c.perfDataCollector.Close()
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -96,169 +148,263 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
|
||||
c.scopeInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_info"),
|
||||
"DHCP Scope information",
|
||||
[]string{"name", "superscope_name", "superscope_id", "scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_state"),
|
||||
"DHCP Scope state",
|
||||
[]string{"scope", "state"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"),
|
||||
"DHCP Scope free addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"),
|
||||
"DHCP Scope free addresses on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"),
|
||||
"DHCP Scope free addresses on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"),
|
||||
"DHCP Scope addresses in use",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"),
|
||||
"DHCP Scope addresses in use on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"),
|
||||
"DHCP Scope addresses in use on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopePendingOffersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"),
|
||||
"DHCP Scope pending offers",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeReservedAddressTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"),
|
||||
"DHCP Scope reserved addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
c.packetsReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
|
||||
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.duplicatesDroppedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "duplicates_dropped_total"),
|
||||
"Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.packetsExpiredTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_expired_total"),
|
||||
"Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.activeQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "active_queue_length"),
|
||||
"Number of packets in the processing queue of the DHCP server (ActiveQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.conflictCheckQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "conflict_check_queue_length"),
|
||||
"Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.discoversTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "discovers_total"),
|
||||
"Total DHCP Discovers received by the DHCP server (DiscoversTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.offersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "offers_total"),
|
||||
"Total DHCP Offers sent by the DHCP server (OffersTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.requestsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Total DHCP Requests received by the DHCP server (RequestsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.informsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "informs_total"),
|
||||
"Total DHCP Informs received by the DHCP server (InformsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.acksTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "acks_total"),
|
||||
"Total DHCP Acks sent by the DHCP server (AcksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.nACKsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "nacks_total"),
|
||||
"Total DHCP Nacks sent by the DHCP server (NacksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.declinesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "declines_total"),
|
||||
"Total DHCP Declines received by the DHCP server (DeclinesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.releasesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "releases_total"),
|
||||
"Total DHCP Releases received by the DHCP server (ReleasesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.offerQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "offer_queue_length"),
|
||||
"Number of packets in the offer queue of the DHCP server (OfferQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.deniedDueToMatch = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_match_total"),
|
||||
"Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.deniedDueToNonMatch = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_nonmatch_total"),
|
||||
"Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_sent_total"),
|
||||
"Number of DHCP fail over Binding Update messages sent (FailoverBndupdSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_received_total"),
|
||||
"Number of DHCP fail over Binding Update messages received (FailoverBndupdReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndAckSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_sent_total"),
|
||||
"Number of DHCP fail over Binding Ack messages sent (FailoverBndackSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndAckReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_received_total"),
|
||||
"Number of DHCP fail over Binding Ack messages received (FailoverBndackReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdPendingOutboundQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_pending_in_outbound_queue"),
|
||||
"Number of pending outbound DHCP fail over Binding Update messages (FailoverBndupdPendingOutboundQueue)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsCommunicationInterruptedState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_communicationinterrupted_state_total"),
|
||||
"Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsPartnerDownState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_partnerdown_state_total"),
|
||||
"Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsRecoverState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_recover_total"),
|
||||
"Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdDropped = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_dropped_total"),
|
||||
"Total number of DHCP fail over Binding Updates dropped (FailoverBndupdDropped)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
|
||||
c.packetsReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
|
||||
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.duplicatesDroppedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "duplicates_dropped_total"),
|
||||
"Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.packetsExpiredTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_expired_total"),
|
||||
"Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.activeQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "active_queue_length"),
|
||||
"Number of packets in the processing queue of the DHCP server (ActiveQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.conflictCheckQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "conflict_check_queue_length"),
|
||||
"Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.discoversTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "discovers_total"),
|
||||
"Total DHCP Discovers received by the DHCP server (DiscoversTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.offersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "offers_total"),
|
||||
"Total DHCP Offers sent by the DHCP server (OffersTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.requestsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Total DHCP Requests received by the DHCP server (RequestsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.informsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "informs_total"),
|
||||
"Total DHCP Informs received by the DHCP server (InformsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.acksTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "acks_total"),
|
||||
"Total DHCP Acks sent by the DHCP server (AcksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.nACKsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "nacks_total"),
|
||||
"Total DHCP Nacks sent by the DHCP server (NacksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.declinesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "declines_total"),
|
||||
"Total DHCP Declines received by the DHCP server (DeclinesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.releasesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "releases_total"),
|
||||
"Total DHCP Releases received by the DHCP server (ReleasesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.offerQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "offer_queue_length"),
|
||||
"Number of packets in the offer queue of the DHCP server (OfferQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.deniedDueToMatch = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_match_total"),
|
||||
"Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.deniedDueToNonMatch = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_nonmatch_total"),
|
||||
"Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_sent_total"),
|
||||
"Number of DHCP fail over Binding Update messages sent (FailoverBndupdSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_received_total"),
|
||||
"Number of DHCP fail over Binding Update messages received (FailoverBndupdReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndAckSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_sent_total"),
|
||||
"Number of DHCP fail over Binding Ack messages sent (FailoverBndackSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndAckReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_received_total"),
|
||||
"Number of DHCP fail over Binding Ack messages received (FailoverBndackReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdPendingOutboundQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_pending_in_outbound_queue"),
|
||||
"Number of pending outbound DHCP fail over Binding Update messages (FailoverBndupdPendingOutboundQueue)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsCommunicationInterruptedState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_communicationinterrupted_state_total"),
|
||||
"Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsPartnerDownState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_partnerdown_state_total"),
|
||||
"Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsRecoverState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_recover_total"),
|
||||
"Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndUpdDropped = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_dropped_total"),
|
||||
"Total number of DHCP fail over Binding Updates dropped (FailoverBndupdDropped)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
var errs []error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
|
||||
if err := c.collectServerMetrics(ch); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
|
||||
if err := c.collectScopeMetrics(ch); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectServerMetrics(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect DHCP Server metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect DHCP Server metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
@@ -413,3 +559,113 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectScopeMetrics(ch chan<- prometheus.Metric) error {
|
||||
dhcpScopes, err := dhcpsapi.GetDHCPV4ScopeStatistics()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get DHCP scopes: %w", err)
|
||||
}
|
||||
|
||||
for _, scope := range dhcpScopes {
|
||||
scopeID := scope.ScopeIPAddress.String()
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeInfo,
|
||||
prometheus.GaugeValue,
|
||||
1,
|
||||
scope.Name,
|
||||
scope.SuperScopeName,
|
||||
strconv.Itoa(int(scope.SuperScopeNumber)),
|
||||
scopeID,
|
||||
)
|
||||
|
||||
for state, name := range dhcpsapi.DHCP_SUBNET_STATE_NAMES {
|
||||
metric := 0.0
|
||||
if state == scope.State {
|
||||
metric = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeState,
|
||||
prometheus.GaugeValue,
|
||||
metric,
|
||||
scopeID,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.AddressesFree != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeAddressesFreeTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.AddressesFree,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.AddressesFreeOnPartnerServer != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeAddressesFreeOnPartnerServerTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.AddressesFreeOnPartnerServer,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.AddressesFreeOnThisServer != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeAddressesFreeOnThisServerTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.AddressesFreeOnThisServer,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.AddressesInUse != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeAddressesInUseTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.AddressesInUse,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.AddressesInUseOnPartnerServer != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeAddressesInUseOnPartnerServerTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.AddressesInUseOnPartnerServer,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.AddressesInUseOnThisServer != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeAddressesInUseOnThisServerTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.AddressesInUseOnThisServer,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.PendingOffers != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopePendingOffersTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.PendingOffers,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
|
||||
if scope.ReservedAddress != -1 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.scopeReservedAddressTotal,
|
||||
prometheus.GaugeValue,
|
||||
scope.ReservedAddress,
|
||||
scopeID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -36,7 +36,9 @@ var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive.
|
||||
type Collector struct {
|
||||
config Config
|
||||
config Config
|
||||
logger *slog.Logger
|
||||
|
||||
miSession *mi.Session
|
||||
miQuery mi.Query
|
||||
|
||||
@@ -71,18 +73,8 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
c.logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
c.diskInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
@@ -120,6 +112,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
var dst []diskDrive
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
@@ -146,7 +150,7 @@ var (
|
||||
"Error",
|
||||
"Degraded",
|
||||
"Unknown",
|
||||
"Pred fail",
|
||||
"Pred Fail",
|
||||
"Starting",
|
||||
"Stopping",
|
||||
"Service",
|
||||
|
||||
@@ -16,8 +16,11 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
@@ -26,12 +29,23 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const Name = "dns"
|
||||
const (
|
||||
Name = "dns"
|
||||
subCollectorMetrics = "metrics"
|
||||
subCollectorWMIStats = "wmi_stats"
|
||||
)
|
||||
|
||||
type Config struct{}
|
||||
type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
subCollectorMetrics,
|
||||
subCollectorWMIStats,
|
||||
},
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
|
||||
type Collector struct {
|
||||
@@ -40,6 +54,9 @@ type Collector struct {
|
||||
perfDataCollector *pdh.Collector
|
||||
perfDataObject []perfDataCounterValues
|
||||
|
||||
miSession *mi.Session
|
||||
miQuery mi.Query
|
||||
|
||||
dynamicUpdatesFailures *prometheus.Desc
|
||||
dynamicUpdatesQueued *prometheus.Desc
|
||||
dynamicUpdatesReceived *prometheus.Desc
|
||||
@@ -62,6 +79,7 @@ type Collector struct {
|
||||
zoneTransferResponsesReceived *prometheus.Desc
|
||||
zoneTransferSuccessReceived *prometheus.Desc
|
||||
zoneTransferSuccessSent *prometheus.Desc
|
||||
dnsWMIStats *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -69,6 +87,10 @@ func New(config *Config) *Collector {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.CollectorsEnabled == nil {
|
||||
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
@@ -76,8 +98,26 @@ func New(config *Config) *Collector {
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
c.config.CollectorsEnabled = make([]string, 0)
|
||||
|
||||
var collectorsEnabled string
|
||||
|
||||
app.Flag(
|
||||
"collector.dns.enabled",
|
||||
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
|
||||
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
@@ -90,14 +130,31 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS collector: %w", err)
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
for _, collector := range c.config.CollectorsEnabled {
|
||||
if !slices.Contains([]string{subCollectorMetrics, subCollectorWMIStats}, collector) {
|
||||
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
|
||||
strings.Join([]string{subCollectorMetrics, subCollectorWMIStats}, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
|
||||
if err := c.buildMetricsCollector(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorWMIStats) {
|
||||
if err := c.buildErrorStatsCollector(miSession); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) buildMetricsCollector() error {
|
||||
c.zoneTransferRequestsReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
|
||||
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
|
||||
@@ -231,15 +288,65 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
c.dnsWMIStats = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "wmi_stats_total"),
|
||||
"DNS WMI statistics from MicrosoftDNS_Statistic",
|
||||
[]string{"name", "collection_name", "dns_server"},
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) buildErrorStatsCollector(miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
query, err := mi.NewQuery("SELECT Name, CollectionName, Value, DnsServerName FROM MicrosoftDNS_Statistic WHERE CollectionName = 'Error Stats'")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create query: %w", err)
|
||||
}
|
||||
|
||||
c.miSession = miSession
|
||||
c.miQuery = query
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
|
||||
if err := c.collectMetrics(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorWMIStats) {
|
||||
if err := c.collectErrorStats(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting WMI statistics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect DNS metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect DNS metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
@@ -493,3 +600,24 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectErrorStats(ch chan<- prometheus.Metric) error {
|
||||
var stats []Statistic
|
||||
if err := c.miSession.Query(&stats, mi.NamespaceRootMicrosoftDNS, c.miQuery); err != nil {
|
||||
return fmt.Errorf("failed to query DNS statistics: %w", err)
|
||||
}
|
||||
|
||||
// Collect DNS error statistics
|
||||
for _, stat := range stats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dnsWMIStats,
|
||||
prometheus.CounterValue,
|
||||
float64(stat.Value),
|
||||
stat.Name,
|
||||
stat.CollectionName,
|
||||
stat.DnsServerName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -105,3 +105,11 @@ type perfDataCounterValues struct {
|
||||
_ float64 `perfdata:"Zone Transfer SOA Request Sent"`
|
||||
_ float64 `perfdata:"Zone Transfer Success"`
|
||||
}
|
||||
|
||||
// Statistic represents the structure for DNS error statistics
|
||||
type Statistic struct {
|
||||
Name string `mi:"Name"`
|
||||
CollectionName string `mi:"CollectionName"`
|
||||
Value uint64 `mi:"Value"`
|
||||
DnsServerName string `mi:"DnsServerName"`
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ type perfDataCounterValuesAutoDiscover struct {
|
||||
func (c *Collector) buildAutoDiscover() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](pdh.CounterTypeRaw, "MSExchange Autodiscover", pdh.InstancesAll)
|
||||
c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](pdh.CounterTypeRaw, "MSExchangeAutodiscover", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ type collectorAvailabilityService struct {
|
||||
}
|
||||
|
||||
type perfDataCounterValuesAvailabilityService struct {
|
||||
RequestsPerSec float64 `perfdata:"Requests/sec"`
|
||||
AvailabilityRequestsPerSec float64 `perfdata:"Availability Requests (sec)"`
|
||||
}
|
||||
|
||||
func (c *Collector) buildAvailabilityService() error {
|
||||
@@ -43,7 +43,7 @@ func (c *Collector) buildAvailabilityService() error {
|
||||
}
|
||||
|
||||
c.availabilityRequestsSec = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "avail_service_requests_per_sec"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, "availability_service_requests_per_sec"),
|
||||
"Number of requests serviced per second",
|
||||
nil,
|
||||
nil,
|
||||
@@ -62,7 +62,7 @@ func (c *Collector) collectAvailabilityService(ch chan<- prometheus.Metric) erro
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availabilityRequestsSec,
|
||||
prometheus.CounterValue,
|
||||
data.RequestsPerSec,
|
||||
data.AvailabilityRequestsPerSec,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ type collectorTransportQueues struct {
|
||||
messagesSubmittedTotal *prometheus.Desc
|
||||
messagesDelayedTotal *prometheus.Desc
|
||||
messagesCompletedDeliveryTotal *prometheus.Desc
|
||||
shadowQueueLength *prometheus.Desc
|
||||
aggregateShadowQueueLength *prometheus.Desc
|
||||
submissionQueueLength *prometheus.Desc
|
||||
delayQueueLength *prometheus.Desc
|
||||
itemsCompletedDeliveryTotal *prometheus.Desc
|
||||
@@ -63,7 +63,7 @@ type perfDataCounterValuesTransportQueues struct {
|
||||
MessagesSubmittedTotal float64 `perfdata:"Messages Submitted Total"`
|
||||
MessagesDelayedTotal float64 `perfdata:"Messages Delayed Total"`
|
||||
MessagesCompletedDeliveryTotal float64 `perfdata:"Messages Completed Delivery Total"`
|
||||
ShadowQueueLength float64 `perfdata:"Shadow Queue Length"`
|
||||
AggregateShadowQueueLength float64 `perfdata:"Aggregate Shadow Queue Length"`
|
||||
SubmissionQueueLength float64 `perfdata:"Submission Queue Length"`
|
||||
DelayQueueLength float64 `perfdata:"Delay Queue Length"`
|
||||
ItemsCompletedDeliveryTotal float64 `perfdata:"Items Completed Delivery Total"`
|
||||
@@ -152,9 +152,9 @@ func (c *Collector) buildTransportQueues() error {
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.shadowQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "transport_queues_shadow_queue_length"),
|
||||
"Shadow Queue Length",
|
||||
c.aggregateShadowQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "transport_queues_aggregate_shadow_queue_length"),
|
||||
"The current number of messages in shadow queues.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
@@ -280,9 +280,9 @@ func (c *Collector) collectTransportQueues(ch chan<- prometheus.Metric) error {
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.shadowQueueLength,
|
||||
c.aggregateShadowQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
data.ShadowQueueLength,
|
||||
data.AggregateShadowQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -149,7 +149,7 @@ func (c *Collector) collectGlobFilePath(ch chan<- prometheus.Metric, filePattern
|
||||
basePath, pattern := doublestar.SplitPattern(filePattern)
|
||||
basePathFS := os.DirFS(basePath)
|
||||
|
||||
matches, err := doublestar.Glob(basePathFS, pattern, doublestar.WithFilesOnly())
|
||||
matches, err := doublestar.Glob(basePathFS, pattern, doublestar.WithFilesOnly(), doublestar.WithCaseInsensitive())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to glob: %w", err)
|
||||
}
|
||||
|
||||
@@ -23,11 +23,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -149,7 +148,7 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled))
|
||||
c.closeFns = make([]func(), 0, len(c.config.CollectorsEnabled))
|
||||
|
||||
@@ -157,19 +156,17 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
version := windows.RtlGetVersion()
|
||||
|
||||
subCollectors := map[string]struct {
|
||||
build func() error
|
||||
collect func(ch chan<- prometheus.Metric) error
|
||||
close func()
|
||||
minBuildNumber uint32
|
||||
minBuildNumber uint16
|
||||
}{
|
||||
subCollectorDataStore: {
|
||||
build: c.buildDataStore,
|
||||
collect: c.collectDataStore,
|
||||
close: c.perfDataCollectorDataStore.Close,
|
||||
minBuildNumber: types.BuildNumberWindowsServer2022,
|
||||
minBuildNumber: osversion.LTSC2022,
|
||||
},
|
||||
subCollectorDynamicMemoryBalancer: {
|
||||
build: c.buildDynamicMemoryBalancer,
|
||||
@@ -227,9 +224,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
close: c.perfDataCollectorVirtualNetworkAdapterDropReasons.Close,
|
||||
},
|
||||
subCollectorVirtualSMB: {
|
||||
build: c.buildVirtualSMB,
|
||||
collect: c.collectVirtualSMB,
|
||||
close: c.perfDataCollectorVirtualSMB.Close,
|
||||
build: c.buildVirtualSMB,
|
||||
collect: c.collectVirtualSMB,
|
||||
close: c.perfDataCollectorVirtualSMB.Close,
|
||||
minBuildNumber: osversion.LTSC2022,
|
||||
},
|
||||
subCollectorVirtualStorageDevice: {
|
||||
build: c.buildVirtualStorageDevice,
|
||||
@@ -243,6 +241,8 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
},
|
||||
}
|
||||
|
||||
buildNumber := osversion.Build()
|
||||
|
||||
// Result must order, to prevent test failures.
|
||||
sort.Strings(c.config.CollectorsEnabled)
|
||||
|
||||
@@ -253,8 +253,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
return fmt.Errorf("unknown collector: %s", name)
|
||||
}
|
||||
|
||||
if version.BuildNumber < subCollectors[name].minBuildNumber {
|
||||
errs = append(errs, fmt.Errorf("collector %s requires Windows Server 2022 or newer", name))
|
||||
if buildNumber < subCollectors[name].minBuildNumber {
|
||||
logger.Warn(fmt.Sprintf(
|
||||
"collector %s requires windows build version %d. Current build version: %d",
|
||||
name, subCollectors[name].minBuildNumber, buildNumber,
|
||||
), slog.String("collector", name))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package hyperv
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus-community/windows_exporter/internal/utils"
|
||||
@@ -40,7 +41,7 @@ type perfDataCounterValuesDynamicMemoryBalancer struct {
|
||||
|
||||
// Hyper-V Dynamic Memory Balancer metrics
|
||||
VmDynamicMemoryBalancerAvailableMemory float64 `perfdata:"Available Memory"`
|
||||
VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing"`
|
||||
VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing" perfdata_min_build:"17763"`
|
||||
VmDynamicMemoryBalancerAveragePressure float64 `perfdata:"Average Pressure"`
|
||||
VmDynamicMemoryBalancerSystemCurrentPressure float64 `perfdata:"System Current Pressure"`
|
||||
}
|
||||
@@ -96,12 +97,14 @@ func (c *Collector) collectDynamicMemoryBalancer(ch chan<- prometheus.Metric) er
|
||||
data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmDynamicMemoryBalancerAvailableMemoryForBalancing,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing),
|
||||
data.Name,
|
||||
)
|
||||
if osversion.Build() >= osversion.LTSC2019 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmDynamicMemoryBalancerAvailableMemoryForBalancing,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing),
|
||||
data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmDynamicMemoryBalancerAveragePressure,
|
||||
|
||||
@@ -18,6 +18,7 @@ package hyperv
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus-community/windows_exporter/internal/utils"
|
||||
@@ -47,7 +48,7 @@ type perfDataCounterValuesDynamicMemoryVM struct {
|
||||
// Hyper-V Dynamic Memory VM metrics
|
||||
VmMemoryAddedMemory float64 `perfdata:"Added Memory"`
|
||||
VmMemoryCurrentPressure float64 `perfdata:"Current Pressure"`
|
||||
VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory"`
|
||||
VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory" perfdata_min_build:"17763"`
|
||||
VmMemoryGuestVisiblePhysicalMemory float64 `perfdata:"Guest Visible Physical Memory"`
|
||||
VmMemoryMaximumPressure float64 `perfdata:"Maximum Pressure"`
|
||||
VmMemoryMemoryAddOperations float64 `perfdata:"Memory Add Operations"`
|
||||
@@ -150,12 +151,14 @@ func (c *Collector) collectDynamicMemoryVM(ch chan<- prometheus.Metric) error {
|
||||
data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmMemoryGuestAvailableMemory,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmMemoryGuestAvailableMemory),
|
||||
data.Name,
|
||||
)
|
||||
if osversion.Build() >= osversion.LTSC2019 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmMemoryGuestAvailableMemory,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmMemoryGuestAvailableMemory),
|
||||
data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmMemoryGuestVisiblePhysicalMemory,
|
||||
|
||||
@@ -40,7 +40,7 @@ type collectorHypervisorVirtualProcessor struct {
|
||||
type perfDataCounterValuesHypervisorVirtualProcessor struct {
|
||||
Name string
|
||||
|
||||
HypervisorVirtualProcessorGuestIdleTimePercent float64 `perfdata:"% Guest Idle Time"`
|
||||
HypervisorVirtualProcessorGuestRunTimePercent float64 `perfdata:"% Guest Run Time"`
|
||||
HypervisorVirtualProcessorHypervisorRunTimePercent float64 `perfdata:"% Hypervisor Run Time"`
|
||||
HypervisorVirtualProcessorTotalRunTimePercent float64 `perfdata:"% Total Run Time"`
|
||||
HypervisorVirtualProcessorRemoteRunTimePercent float64 `perfdata:"% Remote Run Time"`
|
||||
@@ -108,15 +108,15 @@ func (c *Collector) collectHypervisorVirtualProcessor(ch chan<- prometheus.Metri
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.hypervisorVirtualProcessorTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
data.HypervisorVirtualProcessorGuestIdleTimePercent,
|
||||
vmName, coreID, "guest_idle",
|
||||
data.HypervisorVirtualProcessorGuestRunTimePercent,
|
||||
vmName, coreID, "guest",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.hypervisorVirtualProcessorTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
data.HypervisorVirtualProcessorGuestIdleTimePercent,
|
||||
vmName, coreID, "guest_idle",
|
||||
data.HypervisorVirtualProcessorRemoteRunTimePercent,
|
||||
vmName, coreID, "remote",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -167,7 +167,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
prometheus.Labels{"version": fmt.Sprintf("%d.%d", c.iisVersion.major, c.iisVersion.minor)},
|
||||
)
|
||||
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.buildWebService(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to build Web Service collector: %w", err))
|
||||
@@ -247,7 +247,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
1,
|
||||
)
|
||||
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectWebService(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect Web Service metrics: %w", err))
|
||||
|
||||
@@ -505,10 +505,6 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
|
||||
deduplicateIISNames(c.perfDataObjectW3SVCW3WP)
|
||||
|
||||
for _, data := range c.perfDataObjectW3SVCW3WP {
|
||||
if c.config.AppExclude.MatchString(data.Name) || !c.config.AppInclude.MatchString(data.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract the apppool name from the format <PID>_<NAME>
|
||||
pid := workerProcessNameExtractor.ReplaceAllString(data.Name, "$1")
|
||||
|
||||
|
||||
@@ -62,8 +62,6 @@ type collectorWebServiceCache struct {
|
||||
}
|
||||
|
||||
type perfDataCounterServiceCache struct {
|
||||
Name string
|
||||
|
||||
ServiceCacheActiveFlushedEntries float64 `perfdata:"Active Flushed Entries"`
|
||||
ServiceCacheCurrentFileCacheMemoryUsage float64 `perfdata:"Current File Cache Memory Usage"`
|
||||
ServiceCacheMaximumFileCacheMemoryUsage float64 `perfdata:"Maximum File Cache Memory Usage"`
|
||||
@@ -100,10 +98,6 @@ type perfDataCounterServiceCache struct {
|
||||
ServiceCacheOutputCacheFlushesTotal float64 `perfdata:"Output Cache Total Flushes"`
|
||||
}
|
||||
|
||||
func (p perfDataCounterServiceCache) GetName() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
func (c *Collector) buildWebServiceCache() error {
|
||||
var err error
|
||||
|
||||
@@ -291,13 +285,7 @@ func (c *Collector) collectWebServiceCache(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect Web Service Cache metrics: %w", err)
|
||||
}
|
||||
|
||||
deduplicateIISNames(c.perfDataObjectServiceCache)
|
||||
|
||||
for _, data := range c.perfDataObjectServiceCache {
|
||||
if c.config.SiteExclude.MatchString(data.Name) || !c.config.SiteInclude.MatchString(data.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheActiveFlushedEntries,
|
||||
prometheus.GaugeValue,
|
||||
|
||||
@@ -150,13 +150,6 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
c.logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
c.information = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"A metric with a constant '1' value labeled with logical disk information",
|
||||
@@ -281,6 +274,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -588,6 +588,11 @@ func getAllMountedVolumes() (map[string]string, error) {
|
||||
break
|
||||
}
|
||||
|
||||
if errors.Is(err, windows.ERROR_FILE_NOT_FOUND) {
|
||||
// the volume is not mounted
|
||||
break
|
||||
}
|
||||
|
||||
if errors.Is(err, windows.ERROR_NO_MORE_FILES) {
|
||||
rootPathBuf = make([]uint16, (rootPathLen+1)/2)
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ type Config struct{}
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
// Deprecated: Use windows_terminal_services_session_info instead.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
@@ -64,10 +65,16 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
logger.Warn("The logon collector will be removed mid 2025. "+
|
||||
"See https://github.com/prometheus-community/windows_exporter/pull/1957 for more information. If you see values in this collector"+
|
||||
" that you need, please open an issue to discuss how to get them into the new collector.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
|
||||
c.sessionInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "session_logon_timestamp_seconds"),
|
||||
"timestamp of the logon session in seconds.",
|
||||
"Deprecated. Use windows_terminal_services_session_info instead.",
|
||||
[]string{"id", "username", "domain", "type"},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -110,13 +110,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Memory collector: %w", err)
|
||||
}
|
||||
|
||||
c.availableBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
|
||||
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
|
||||
@@ -340,13 +333,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Memory collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectPDH(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting memory metrics: %w", err))
|
||||
@@ -390,6 +390,8 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -122,7 +122,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
|
||||
c.miSession = miSession
|
||||
|
||||
errs := make([]error, 0, 5)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorCluster) {
|
||||
if err := c.buildCluster(); err != nil {
|
||||
@@ -227,7 +227,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
wg.Wait()
|
||||
close(errCh)
|
||||
|
||||
errs := make([]error, 0, 5)
|
||||
errs := make([]error, 0)
|
||||
|
||||
for err := range errCh {
|
||||
errs = append(errs, err)
|
||||
|
||||
@@ -18,6 +18,7 @@ package mscluster
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -192,7 +193,14 @@ type msClusterCluster struct {
|
||||
}
|
||||
|
||||
func (c *Collector) buildCluster() error {
|
||||
clusterMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Cluster")
|
||||
buildNumber := osversion.Build()
|
||||
|
||||
wmiSelect := "AddEvictDelay,AdminAccessPoint,AutoAssignNodeSite,AutoBalancerLevel,AutoBalancerMode,BackupInProgress,BlockCacheSize,ClusSvcHangTimeout,ClusSvcRegroupOpeningTimeout,ClusSvcRegroupPruningTimeout,ClusSvcRegroupStageTimeout,ClusSvcRegroupTickInMilliseconds,ClusterEnforcedAntiAffinity,ClusterFunctionalLevel,ClusterGroupWaitDelay,ClusterLogLevel,ClusterLogSize,ClusterUpgradeVersion,CrossSiteDelay,CrossSiteThreshold,CrossSubnetDelay,CrossSubnetThreshold,CsvBalancer,DatabaseReadWriteMode,DefaultNetworkRole,DisableGroupPreferredOwnerRandomization,DrainOnShutdown,DynamicQuorumEnabled,EnableSharedVolumes,FixQuorum,GracePeriodEnabled,GracePeriodTimeout,GroupDependencyTimeout,HangRecoveryAction,IgnorePersistentStateOnStartup,LogResourceControls,LowerQuorumPriorityNodeId,MessageBufferLength,MinimumNeverPreemptPriority,MinimumPreemptorPriority,NetftIPSecEnabled,PlacementOptions,PlumbAllCrossSubnetRoutes,PreventQuorum,QuarantineDuration,QuarantineThreshold,QuorumArbitrationTimeMax,QuorumArbitrationTimeMin,QuorumLogFileSize,QuorumTypeValue,RequestReplyTimeout,ResiliencyDefaultPeriod,ResiliencyLevel,ResourceDllDeadlockPeriod,RootMemoryReserved,RouteHistoryLength,S2DBusTypes,S2DCacheDesiredState,S2DCacheFlashReservePercent,S2DCachePageSizeKBytes,S2DEnabled,S2DIOLatencyThreshold,S2DOptimizations,SameSubnetDelay,SameSubnetThreshold,SecurityLevel,SharedVolumeVssWriterOperationTimeout,ShutdownTimeoutInMinutes,UseClientAccessNetworksForSharedVolumes,WitnessDatabaseWriteTimeout,WitnessDynamicWeight,WitnessRestartInterval"
|
||||
if buildNumber >= osversion.LTSC2022 {
|
||||
wmiSelect += ",DetectManagedEvents,SecurityLevelForStorage,MaxNumberOfNodes,DetectManagedEventsThreshold,DetectedCloudPlatform"
|
||||
}
|
||||
|
||||
clusterMIQuery, err := mi.NewQuery(fmt.Sprintf("SELECT %s FROM MSCluster_Cluster", wmiSelect))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
@@ -852,27 +860,6 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterDetectedCloudPlatform,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectedCloudPlatform),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterDetectManagedEvents,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectManagedEvents),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterDetectManagedEventsThreshold,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectManagedEventsThreshold),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterDisableGroupPreferredOwnerRandomization,
|
||||
prometheus.GaugeValue,
|
||||
@@ -957,13 +944,6 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterMaxNumberOfNodes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MaxNumberOfNodes),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterMessageBufferLength,
|
||||
prometheus.GaugeValue,
|
||||
@@ -1167,13 +1147,6 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterSecurityLevelForStorage,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.SecurityLevelForStorage),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterSharedVolumeVssWriterOperationTimeout,
|
||||
prometheus.GaugeValue,
|
||||
@@ -1215,6 +1188,43 @@ func (c *Collector) collectCluster(ch chan<- prometheus.Metric) error {
|
||||
float64(v.WitnessRestartInterval),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
if osversion.Build() >= osversion.LTSC2022 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterDetectManagedEvents,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectManagedEvents),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterDetectManagedEventsThreshold,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectManagedEventsThreshold),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterSecurityLevelForStorage,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.SecurityLevelForStorage),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterMaxNumberOfNodes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MaxNumberOfNodes),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clusterDetectedCloudPlatform,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectedCloudPlatform),
|
||||
v.Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -48,7 +48,7 @@ type msClusterNetwork struct {
|
||||
}
|
||||
|
||||
func (c *Collector) buildNetwork() error {
|
||||
networkMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Network")
|
||||
networkMIQuery, err := mi.NewQuery("SELECT Characteristics,Flags,Metric,Role,State FROM MSCluster_Network")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package mscluster
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -66,7 +67,14 @@ type msClusterNode struct {
|
||||
}
|
||||
|
||||
func (c *Collector) buildNode() error {
|
||||
nodeMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Node")
|
||||
buildNumber := osversion.Build()
|
||||
|
||||
wmiSelect := "BuildNumber,Characteristics,DynamicWeight,Flags,MajorVersion,MinorVersion,NeedsPreventQuorum,NodeDrainStatus,NodeHighestVersion,NodeLowestVersion,NodeWeight,State,StatusInformation"
|
||||
if buildNumber >= osversion.LTSC2022 {
|
||||
wmiSelect += ",DetectedCloudPlatform"
|
||||
}
|
||||
|
||||
nodeMIQuery, err := mi.NewQuery(fmt.Sprintf("SELECT %s FROM MSCluster_Node", wmiSelect))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ type msClusterResource struct {
|
||||
}
|
||||
|
||||
func (c *Collector) buildResource() error {
|
||||
resourceMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_Resource")
|
||||
resourceMIQuery, err := mi.NewQuery("SELECT Name,Type,OwnerGroup,OwnerNode,Characteristics,DeadlockTimeout,EmbeddedFailureAction,Flags,IsAlivePollInterval,LooksAlivePollInterval,MonitorProcessId,PendingTimeout,ResourceClass,RestartAction,RestartDelay,RestartPeriod,RestartThreshold,RetryPeriodOnFailure,State,Subclass FROM MSCluster_Resource")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ type msClusterResourceGroup struct {
|
||||
}
|
||||
|
||||
func (c *Collector) buildResourceGroup() error {
|
||||
resourceGroupMIQuery, err := mi.NewQuery("SELECT * FROM MSCluster_ResourceGroup")
|
||||
resourceGroupMIQuery, err := mi.NewQuery("SELECT AutoFailbackType,Characteristics,ColdStartSetting,DefaultOwner,FailbackWindowEnd,FailbackWindowStart,FailoverPeriod,FailoverThreshold,Flags,GroupType,OwnerNode,Priority,ResiliencyPeriod,State FROM MSCluster_ResourceGroup")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
@@ -74,13 +74,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
|
||||
}
|
||||
|
||||
c.bytesInJournalQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"),
|
||||
"Size of queue journal in bytes",
|
||||
@@ -106,6 +99,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -333,7 +333,7 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) {
|
||||
return nil, fmt.Errorf("couldn't get instance info: %w", err)
|
||||
}
|
||||
|
||||
instance, err := newMssqlInstance(instanceVersion)
|
||||
instance, err := newMssqlInstance(instanceName, instanceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -348,14 +348,14 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) {
|
||||
|
||||
// mssqlGetPerfObjectName returns the name of the Windows Performance
|
||||
// Counter object for the given SQL instance and Collector.
|
||||
func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string) string {
|
||||
func (c *Collector) mssqlGetPerfObjectName(sqlInstance mssqlInstance, collector string) string {
|
||||
sb := strings.Builder{}
|
||||
|
||||
if sqlInstance == "MSSQLSERVER" {
|
||||
if sqlInstance.isFirstInstance {
|
||||
sb.WriteString("SQLServer:")
|
||||
} else {
|
||||
sb.WriteString("MSSQL$")
|
||||
sb.WriteString(sqlInstance)
|
||||
sb.WriteString(sqlInstance.name)
|
||||
sb.WriteString(":")
|
||||
}
|
||||
|
||||
@@ -369,8 +369,8 @@ func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string)
|
||||
func (c *Collector) collect(
|
||||
ch chan<- prometheus.Metric,
|
||||
collector string,
|
||||
perfDataCollectors map[string]*pdh.Collector,
|
||||
collectFn func(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error,
|
||||
perfDataCollectors map[mssqlInstance]*pdh.Collector,
|
||||
collectFn func(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error,
|
||||
) error {
|
||||
errs := make([]error, 0, len(perfDataCollectors))
|
||||
|
||||
@@ -386,11 +386,11 @@ func (c *Collector) collect(
|
||||
errs = append(errs, err)
|
||||
success = 0.0
|
||||
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance, duration),
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance.name, duration),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
} else {
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance, duration))
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance.name, duration))
|
||||
}
|
||||
|
||||
if collector == "" {
|
||||
@@ -401,13 +401,13 @@ func (c *Collector) collect(
|
||||
c.mssqlScrapeDurationDesc,
|
||||
prometheus.GaugeValue,
|
||||
duration.Seconds(),
|
||||
collector, sqlInstance,
|
||||
collector, sqlInstance.name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mssqlScrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
success,
|
||||
collector, sqlInstance,
|
||||
collector, sqlInstance.name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorAccessMethods struct {
|
||||
accessMethodsPerfDataCollectors map[string]*pdh.Collector
|
||||
accessMethodsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
accessMethodsPerfDataObject []perfDataCounterValuesAccessMethods
|
||||
|
||||
accessMethodsAUcleanupbatches *prometheus.Desc
|
||||
@@ -118,17 +118,17 @@ type perfDataCounterValuesAccessMethods struct {
|
||||
AccessMethodsWorkfilesCreatedPerSec float64 `perfdata:"Workfiles Created/sec"`
|
||||
AccessMethodsWorktablesCreatedPerSec float64 `perfdata:"Worktables Created/sec"`
|
||||
AccessMethodsWorktablesFromCacheRatio float64 `perfdata:"Worktables From Cache Ratio"`
|
||||
AccessMethodsWorktablesFromCacheRatioBase float64 `perfdata:"Worktables From Cache Base,secondvalue"`
|
||||
AccessMethodsWorktablesFromCacheRatioBase float64 `perfdata:"Worktables From Cache Ratio,secondvalue"`
|
||||
}
|
||||
|
||||
func (c *Collector) buildAccessMethods() error {
|
||||
var err error
|
||||
|
||||
c.accessMethodsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.accessMethodsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.accessMethodsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Access Methods"), nil)
|
||||
c.accessMethodsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -407,7 +407,7 @@ func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorAccessMethods, c.accessMethodsPerfDataCollectors, c.collectAccessMethodsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.accessMethodsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err)
|
||||
@@ -417,308 +417,308 @@ func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sq
|
||||
c.accessMethodsAUcleanupbatches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupbatchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsAUcleanups,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsByReferenceLobCreateCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobCreateCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsByReferenceLobUseCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobUseCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsCountLobReadahead,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsCountLobReadahead,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsCountPullInRow,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsCountPullInRow,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsCountPushOffRow,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsCountPushOffRow,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDeferreddroppedAUs,
|
||||
prometheus.GaugeValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedAUs,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDeferredDroppedrowsets,
|
||||
prometheus.GaugeValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedRowsets,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDroppedrowsetcleanups,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetCleanupsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDroppedrowsetsskipped,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetsSkippedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsExtentDeallocations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsExtentDeallocationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsExtentsAllocated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsExtentsAllocatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFailedAUcleanupbatches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFailedAUCleanupBatchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFailedleafpagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFailedLeafPageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFailedtreepagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFailedTreePageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsForwardedRecords,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsForwardedRecordsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFreeSpacePageFetches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpacePageFetchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFreeSpaceScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpaceScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFullScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFullScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsIndexSearches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsIndexSearchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsInSysXactwaits,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsInSysXactWaitsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobHandleCreateCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleCreateCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobHandleDestroyCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleDestroyCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobSSProviderCreateCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderCreateCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobSSProviderDestroyCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderDestroyCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobSSProviderTruncationCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderTruncationCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsMixedPageAllocations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsMixedPageAllocationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPageCompressionAttempts,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPageCompressionAttemptsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPageDeallocations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPageDeallocationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPagesAllocated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPagesAllocatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPagesCompressed,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPagesCompressedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPageSplits,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPageSplitsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsProbeScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsProbeScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsRangeScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsRangeScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsScanPointRevalidations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsScanPointRevalidationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsSkippedGhostedRecords,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsSkippedGhostedRecordsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsTableLockEscalations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsTableLockEscalationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsUsedleafpagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsUsedLeafPageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsUsedtreepagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsUsedTreePageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorkfilesCreated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorkfilesCreatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorktablesCreated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesCreatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorktablesFromCacheHits,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatio,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorktablesFromCacheLookups,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatioBase,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorAvailabilityReplica struct {
|
||||
availabilityReplicaPerfDataCollectors map[string]*pdh.Collector
|
||||
availabilityReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
availabilityReplicaPerfDataObject []perfDataCounterValuesAvailabilityReplica
|
||||
|
||||
availReplicaBytesReceivedFromReplica *prometheus.Desc
|
||||
@@ -57,11 +57,11 @@ type perfDataCounterValuesAvailabilityReplica struct {
|
||||
func (c *Collector) buildAvailabilityReplica() error {
|
||||
var err error
|
||||
|
||||
c.availabilityReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.availabilityReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.availabilityReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Availability Replica"), pdh.InstancesAll)
|
||||
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -130,7 +130,7 @@ func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) erro
|
||||
return c.collect(ch, subCollectorAvailabilityReplica, c.availabilityReplicaPerfDataCollectors, c.collectAvailabilityReplicaInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.availabilityReplicaPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), err)
|
||||
@@ -141,63 +141,63 @@ func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metr
|
||||
c.availReplicaBytesReceivedFromReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaBytesReceivedFromReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaBytesSentToReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaBytesSentToReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaBytesSentToTransport,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaBytesSentToTransportPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaFlowControl,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaFlowControlPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaFlowControlTimeMS,
|
||||
prometheus.CounterValue,
|
||||
utils.MilliSecToSec(data.AvailReplicaFlowControlTimeMSPerSec),
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaReceivesFromReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaReceivesFromReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaResentMessages,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaResentMessagesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaSendsToReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaSendsToReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaSendsToTransport,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaSendsToTransportPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorBufferManager struct {
|
||||
bufManPerfDataCollectors map[string]*pdh.Collector
|
||||
bufManPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
bufManPerfDataObject []perfDataCounterValuesBufMan
|
||||
|
||||
bufManBackgroundwriterpages *prometheus.Desc
|
||||
@@ -56,7 +56,7 @@ type collectorBufferManager struct {
|
||||
type perfDataCounterValuesBufMan struct {
|
||||
BufManBackgroundWriterPagesPerSec float64 `perfdata:"Background writer pages/sec"`
|
||||
BufManBufferCacheHitRatio float64 `perfdata:"Buffer cache hit ratio"`
|
||||
BufManBufferCacheHitRatioBase float64 `perfdata:"Buffer cache hit ratio base,secondvalue"`
|
||||
BufManBufferCacheHitRatioBase float64 `perfdata:"Buffer cache hit ratio,secondvalue"`
|
||||
BufManCheckpointPagesPerSec float64 `perfdata:"Checkpoint pages/sec"`
|
||||
BufManDatabasePages float64 `perfdata:"Database pages"`
|
||||
BufManExtensionAllocatedPages float64 `perfdata:"Extension allocated pages"`
|
||||
@@ -82,11 +82,11 @@ type perfDataCounterValuesBufMan struct {
|
||||
func (c *Collector) buildBufferManager() error {
|
||||
var err error
|
||||
|
||||
c.bufManPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.bufManPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.bufManPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Buffer Manager"), nil)
|
||||
c.bufManPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -238,7 +238,7 @@ func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorBufferManager, c.bufManPerfDataCollectors, c.collectBufferManagerInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.bufManPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), err)
|
||||
@@ -249,161 +249,161 @@ func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sq
|
||||
c.bufManBackgroundwriterpages,
|
||||
prometheus.CounterValue,
|
||||
data.BufManBackgroundWriterPagesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManBuffercachehits,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManBufferCacheHitRatio,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManBuffercachelookups,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManBufferCacheHitRatioBase,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManCheckpointpages,
|
||||
prometheus.CounterValue,
|
||||
data.BufManCheckpointPagesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManDatabasepages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManDatabasePages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionallocatedpages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionAllocatedPages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionfreepages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionFreePages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensioninuseaspercentage,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionInUseAsPercentage,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionoutstandingIOcounter,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionOutstandingIOCounter,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpageevictions,
|
||||
prometheus.CounterValue,
|
||||
data.BufManExtensionPageEvictionsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpagereads,
|
||||
prometheus.CounterValue,
|
||||
data.BufManExtensionPageReadsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpageunreferencedtime,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionPageUnreferencedTime,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpagewrites,
|
||||
prometheus.CounterValue,
|
||||
data.BufManExtensionPageWritesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManFreeliststalls,
|
||||
prometheus.CounterValue,
|
||||
data.BufManFreeListStallsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManIntegralControllerSlope,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManIntegralControllerSlope,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManLazywrites,
|
||||
prometheus.CounterValue,
|
||||
data.BufManLazyWritesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagelifeexpectancy,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManPageLifeExpectancy,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagelookups,
|
||||
prometheus.CounterValue,
|
||||
data.BufManPageLookupsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagereads,
|
||||
prometheus.CounterValue,
|
||||
data.BufManPageReadsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagewrites,
|
||||
prometheus.CounterValue,
|
||||
data.BufManPageWritesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManReadaheadpages,
|
||||
prometheus.CounterValue,
|
||||
data.BufManReadaheadPagesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManReadaheadtime,
|
||||
prometheus.CounterValue,
|
||||
data.BufManReadaheadTimePerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManTargetpages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManTargetPages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
)
|
||||
|
||||
type collectorDatabases struct {
|
||||
databasesPerfDataCollectors map[string]*pdh.Collector
|
||||
databasesPerfDataCollectors2019 map[string]*pdh.Collector
|
||||
databasesPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
databasesPerfDataCollectors2019 map[mssqlInstance]*pdh.Collector
|
||||
databasesPerfDataObject []perfDataCounterValuesDatabases
|
||||
databasesPerfDataObject2019 []perfDataCounterValuesDatabases2019
|
||||
|
||||
@@ -93,7 +93,7 @@ type perfDataCounterValuesDatabases struct {
|
||||
DatabasesGroupCommitTimePerSec float64 `perfdata:"Group Commit Time/sec"`
|
||||
DatabasesLogBytesFlushedPerSec float64 `perfdata:"Log Bytes Flushed/sec"`
|
||||
DatabasesLogCacheHitRatio float64 `perfdata:"Log Cache Hit Ratio"`
|
||||
DatabasesLogCacheHitRatioBase float64 `perfdata:"Log Cache Hit Ratio Base,secondvalue"`
|
||||
DatabasesLogCacheHitRatioBase float64 `perfdata:"Log Cache Hit Ratio,secondvalue"`
|
||||
DatabasesLogCacheReadsPerSec float64 `perfdata:"Log Cache Reads/sec"`
|
||||
DatabasesLogFilesSizeKB float64 `perfdata:"Log File(s) Size (KB)"`
|
||||
DatabasesLogFilesUsedSizeKB float64 `perfdata:"Log File(s) Used Size (KB)"`
|
||||
@@ -141,18 +141,18 @@ type perfDataCounterValuesDatabases2019 struct {
|
||||
func (c *Collector) buildDatabases() error {
|
||||
var err error
|
||||
|
||||
c.databasesPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.databasesPerfDataCollectors2019 = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.databasesPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.databasesPerfDataCollectors2019 = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.databasesPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
|
||||
c.databasesPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
|
||||
if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) {
|
||||
c.databasesPerfDataCollectors2019[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
|
||||
c.databasesPerfDataCollectors2019[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Databases 2019 collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -458,7 +458,7 @@ func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error {
|
||||
)
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.databasesPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
|
||||
@@ -469,336 +469,336 @@ func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlIns
|
||||
c.databasesActiveTransactions,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesActiveTransactions,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesBackupPerRestoreThroughput,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesBackupPerRestoreThroughputPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesBulkCopyRows,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesBulkCopyRowsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesBulkCopyThroughput,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesBulkCopyThroughputPerSec*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesCommitTableEntries,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesCommitTableEntries,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesDataFilesSizeKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesDataFilesSizeKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesDBCCLogicalScanBytes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesDBCCLogicalScanBytesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesGroupCommitTime,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesGroupCommitTimePerSec/1000000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogBytesFlushed,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogBytesFlushedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogCacheHits,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogCacheHitRatio,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogCacheLookups,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogCacheHitRatioBase,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogCacheReads,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogCacheReadsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFilesSizeKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFilesSizeKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFilesUsedSizeKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFilesUsedSizeKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogFlushesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushWaits,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogFlushWaitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushWaitTime,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFlushWaitTime/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushWriteTimeMS,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFlushWriteTimeMS/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogGrowths,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogGrowths,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolCacheMisses,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolCacheMissesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolDiskReads,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolDiskReadsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolHashDeletes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolHashDeletesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolHashInserts,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolHashInsertsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolInvalidHashEntry,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolInvalidHashEntryPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolLogScanPushes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolLogScanPushesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolLogWriterPushes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolLogWriterPushesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolPushEmptyFreePool,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolPushEmptyFreePoolPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolPushLowMemory,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolPushLowMemoryPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolPushNoFreeBuffer,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolPushNoFreeBufferPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolReqBehindTrunc,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolReqBehindTruncPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolRequestsOldVLF,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolRequestsOldVLFPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolRequests,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolRequestsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolTotalActiveLogSize,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogPoolTotalActiveLogSize,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolTotalSharedPoolSize,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogPoolTotalSharedPoolSize,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogShrinks,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogShrinks,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogTruncations,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogTruncations,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesPercentLogUsed,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesPercentLogUsed,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesReplPendingXacts,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesReplPendingXacts,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesReplTransRate,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesReplTransRate,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesShrinkDataMovementBytes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesShrinkDataMovementBytesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesTrackedTransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesTrackedTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesTransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesWriteTransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesWriteTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPControllerDLCLatencyPerFetch,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesXTPControllerDLCLatencyPerFetch,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPControllerDLCPeakLatency,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesXTPControllerDLCPeakLatency*1000000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPControllerLogProcessed,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesXTPControllerLogProcessedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPMemoryUsedKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesXTPMemoryUsedKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.databasesPerfDataObject2019)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
|
||||
@@ -809,7 +809,7 @@ func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sq
|
||||
c.databasesActiveParallelRedoThreads,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesActiveParallelRedoThreads,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorDatabaseReplica struct {
|
||||
dbReplicaPerfDataCollectors map[string]*pdh.Collector
|
||||
dbReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
dbReplicaPerfDataObject []perfDataCounterValuesDBReplica
|
||||
|
||||
dbReplicaDatabaseFlowControlDelay *prometheus.Desc
|
||||
@@ -86,11 +86,11 @@ type perfDataCounterValuesDBReplica struct {
|
||||
func (c *Collector) buildDatabaseReplica() error {
|
||||
var err error
|
||||
|
||||
c.dbReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.dbReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.dbReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Database Replica"), pdh.InstancesAll)
|
||||
c.dbReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -249,7 +249,7 @@ func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorDatabaseReplica, c.dbReplicaPerfDataCollectors, c.collectDatabaseReplicaInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.dbReplicaPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), err)
|
||||
@@ -260,168 +260,168 @@ func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric,
|
||||
c.dbReplicaDatabaseFlowControlDelay,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaDatabaseFlowControlDelay,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaDatabaseFlowControls,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaDatabaseFlowControlsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaFileBytesReceived,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaFileBytesReceivedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaGroupCommits,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaGroupCommitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaGroupCommitTime,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaGroupCommitTime,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogApplyPendingQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogApplyPendingQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogApplyReadyQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogApplyReadyQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogBytesCompressed,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogBytesCompressedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogBytesDecompressed,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogBytesDecompressedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogBytesReceived,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogBytesReceivedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogCompressionCachehits,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogCompressionCacheHitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogCompressionCachemisses,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogCompressionCacheMissesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogCompressions,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogCompressionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogDecompressions,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogDecompressionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogremainingforundo,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogRemainingForUndo,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogSendQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogSendQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaMirroredWritetransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaMirroredWriteTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRecoveryQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaRecoveryQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedoblocked,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaRedoBlockedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedoBytesRemaining,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaRedoBytesRemaining,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedoneBytes,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaRedoneBytesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedones,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaRedonesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaTotalLogrequiringundo,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaTotalLogRequiringUndo,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaTransactionDelay,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaTransactionDelay/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorGeneralStatistics struct {
|
||||
genStatsPerfDataCollectors map[string]*pdh.Collector
|
||||
genStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
genStatsPerfDataObject []perfDataCounterValuesGenStats
|
||||
|
||||
genStatsActiveTempTables *prometheus.Desc
|
||||
@@ -84,11 +84,11 @@ type perfDataCounterValuesGenStats struct {
|
||||
func (c *Collector) buildGeneralStatistics() error {
|
||||
var err error
|
||||
|
||||
c.genStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.genStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.genStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "General Statistics"), nil)
|
||||
c.genStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error
|
||||
return c.collect(ch, subCollectorGeneralStatistics, c.genStatsPerfDataCollectors, c.collectGeneralStatisticsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.genStatsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err)
|
||||
@@ -257,168 +257,168 @@ func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric
|
||||
c.genStatsActiveTempTables,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsActiveTempTables,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsConnectionReset,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsConnectionResetPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsEventNotificationsDelayedDrop,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsEventNotificationsDelayedDrop,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsHTTPAuthenticatedRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsHTTPAuthenticatedRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsLogicalConnections,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsLogicalConnections,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsLogins,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsLoginsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsLogouts,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsLogoutsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsMarsDeadlocks,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsMarsDeadlocks,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsNonAtomicYieldRate,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsNonatomicYieldRate,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsProcessesBlocked,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsProcessesBlocked,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPEmptyRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPEmptyRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPMethodInvocations,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPMethodInvocations,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPSessionInitiateRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPSessionInitiateRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPSessionTerminateRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPSessionTerminateRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPSQLRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPSQLRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPWSDLRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPWSDLRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSQLTraceIOProviderLockWaits,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSQLTraceIOProviderLockWaits,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempDBRecoveryUnitID,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempdbRecoveryUnitID,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempDBrowSetID,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempdbRowsetID,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempTablesCreationRate,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempTablesCreationRate,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempTablesForDestruction,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempTablesForDestruction,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTraceEventNotificationQueue,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTraceEventNotificationQueue,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTransactions,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsUserConnections,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsUserConnections,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorLocks struct {
|
||||
locksPerfDataCollectors map[string]*pdh.Collector
|
||||
locksPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
locksPerfDataObject []perfDataCounterValuesLocks
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerLocks
|
||||
@@ -43,7 +43,7 @@ type perfDataCounterValuesLocks struct {
|
||||
Name string
|
||||
|
||||
LocksAverageWaitTimeMS float64 `perfdata:"Average Wait Time (ms)"`
|
||||
LocksAverageWaitTimeMSBase float64 `perfdata:"Average Wait Time Base,secondvalue"`
|
||||
LocksAverageWaitTimeMSBase float64 `perfdata:"Average Wait Time (ms),secondvalue"`
|
||||
LocksLockRequestsPerSec float64 `perfdata:"Lock Requests/sec"`
|
||||
LocksLockTimeoutsPerSec float64 `perfdata:"Lock Timeouts/sec"`
|
||||
LocksLockTimeoutsTimeout0PerSec float64 `perfdata:"Lock Timeouts (timeout > 0)/sec"`
|
||||
@@ -55,11 +55,11 @@ type perfDataCounterValuesLocks struct {
|
||||
func (c *Collector) buildLocks() error {
|
||||
var err error
|
||||
|
||||
c.locksPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.locksPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.locksPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Locks"), pdh.InstancesAll)
|
||||
c.locksPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Locks"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -121,7 +121,7 @@ func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorLocks, c.locksPerfDataCollectors, c.collectLocksInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.locksPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Locks"), err)
|
||||
@@ -132,56 +132,56 @@ func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstanc
|
||||
c.locksWaitTime,
|
||||
prometheus.GaugeValue,
|
||||
data.LocksAverageWaitTimeMS/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksCount,
|
||||
prometheus.GaugeValue,
|
||||
data.LocksAverageWaitTimeMSBase/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockRequests,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockRequestsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockTimeouts,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockTimeoutsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockTimeoutstimeout0,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockTimeoutsTimeout0PerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockWaits,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockWaitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockWaitTimeMS,
|
||||
prometheus.GaugeValue,
|
||||
data.LocksLockWaitTimeMS/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksNumberOfDeadlocks,
|
||||
prometheus.CounterValue,
|
||||
data.LocksNumberOfDeadlocksPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorMemoryManager struct {
|
||||
memMgrPerfDataCollectors map[string]*pdh.Collector
|
||||
memMgrPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
memMgrPerfDataObject []perfDataCounterValuesMemMgr
|
||||
|
||||
memMgrConnectionMemoryKB *prometheus.Desc
|
||||
@@ -76,11 +76,11 @@ type perfDataCounterValuesMemMgr struct {
|
||||
func (c *Collector) buildMemoryManager() error {
|
||||
var err error
|
||||
|
||||
c.memMgrPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.memMgrPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.memMgrPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Memory Manager"), pdh.InstancesAll)
|
||||
c.memMgrPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -214,7 +214,7 @@ func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorMemoryManager, c.memMgrPerfDataCollectors, c.collectMemoryManagerInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.memMgrPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err)
|
||||
@@ -224,140 +224,140 @@ func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sq
|
||||
c.memMgrConnectionMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrConnectionMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrDatabaseCacheMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrDatabaseCacheMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrExternalBenefitOfMemory,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrExternalBenefitOfMemory,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrFreeMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrFreeMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrGrantedWorkspaceMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrGrantedWorkspaceMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockBlocks,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockBlocks,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockBlocksAllocated,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockBlocksAllocated,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockOwnerBlocks,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocks,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockOwnerBlocksAllocated,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocksAllocated,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLogPoolMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLogPoolMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrMaximumWorkspaceMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrMaximumWorkspaceMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrMemoryGrantsOutstanding,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrMemoryGrantsOutstanding,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrMemoryGrantsPending,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrMemoryGrantsPending,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrOptimizerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrOptimizerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrReservedServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrReservedServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrSQLCacheMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrSQLCacheMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrStolenServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrStolenServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrTargetServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrTargetServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrTotalServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrTotalServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorSQLErrors struct {
|
||||
sqlErrorsPerfDataCollectors map[string]*pdh.Collector
|
||||
sqlErrorsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
sqlErrorsPerfDataObject []perfDataCounterValuesSqlErrors
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
|
||||
@@ -41,11 +41,11 @@ type perfDataCounterValuesSqlErrors struct {
|
||||
func (c *Collector) buildSQLErrors() error {
|
||||
var err error
|
||||
|
||||
c.sqlErrorsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.sqlErrorsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.sqlErrorsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Errors"), pdh.InstancesAll)
|
||||
c.sqlErrorsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorSQLErrors, c.sqlErrorsPerfDataCollectors, c.collectSQLErrorsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.sqlErrorsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), err)
|
||||
@@ -77,7 +77,7 @@ func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlIns
|
||||
c.sqlErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
data.SqlErrorsErrorsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorSQLStats struct {
|
||||
sqlStatsPerfDataCollectors map[string]*pdh.Collector
|
||||
sqlStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
sqlStatsPerfDataObject []perfDataCounterValuesSqlStats
|
||||
|
||||
sqlStatsAutoParamAttempts *prometheus.Desc
|
||||
@@ -58,11 +58,11 @@ type perfDataCounterValuesSqlStats struct {
|
||||
func (c *Collector) buildSQLStats() error {
|
||||
var err error
|
||||
|
||||
c.sqlStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.sqlStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.sqlStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Statistics"), nil)
|
||||
c.sqlStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -142,7 +142,7 @@ func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorSQLStats, c.sqlStatsPerfDataCollectors, c.collectSQLStatsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.sqlStatsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err)
|
||||
@@ -152,77 +152,77 @@ func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInst
|
||||
c.sqlStatsAutoParamAttempts,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsAutoParamAttemptsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsBatchRequests,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsBatchRequestsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsFailedAutoParams,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsFailedAutoParamsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsForcedParameterizations,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsForcedParameterizationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsGuidedplanexecutions,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsGuidedplanexecutionsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsMisguidedplanexecutions,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsMisguidedplanexecutionsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSafeAutoParams,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSafeAutoParamsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSQLAttentionrate,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSQLAttentionrate,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSQLCompilations,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSQLCompilationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSQLReCompilations,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSQLReCompilationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsUnsafeAutoParams,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsUnsafeAutoParamsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorTransactions struct {
|
||||
transactionsPerfDataCollectors map[string]*pdh.Collector
|
||||
transactionsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
transactionsPerfDataObject []perfDataCounterValuesTransactions
|
||||
|
||||
transactionsTempDbFreeSpaceBytes *prometheus.Desc
|
||||
@@ -62,11 +62,11 @@ type perfDataCounterValuesTransactions struct {
|
||||
func (c *Collector) buildTransactions() error {
|
||||
var err error
|
||||
|
||||
c.transactionsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.transactionsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.transactionsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Transactions"), nil)
|
||||
c.transactionsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -160,7 +160,7 @@ func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error {
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
|
||||
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.transactionsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err)
|
||||
@@ -170,91 +170,91 @@ func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sql
|
||||
c.transactionsTempDbFreeSpaceBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsFreeSpaceintempdbKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsLongestTransactionRunningSeconds,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsLongestTransactionRunningTime,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsNonSnapshotVersionActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsNonSnapshotVersionTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsSnapshotTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsActive,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsUpdateConflictsTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsUpdateconflictratio,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsUpdateSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsUpdateSnapshotTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionCleanupRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionCleanuprateKBPers*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionGenerationRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionGenerationrateKBPers*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreSizeBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreSizeKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreUnits,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreCreationUnits,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcreation,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreTruncationUnits,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreunittruncation,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorWaitStats struct {
|
||||
waitStatsPerfDataCollectors map[string]*pdh.Collector
|
||||
waitStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
waitStatsPerfDataObject []perfDataCounterValuesWaitStats
|
||||
|
||||
waitStatsLockWaits *prometheus.Desc
|
||||
@@ -62,11 +62,11 @@ type perfDataCounterValuesWaitStats struct {
|
||||
func (c *Collector) buildWaitStats() error {
|
||||
var err error
|
||||
|
||||
c.waitStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.waitStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.waitStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Wait Statistics"), pdh.InstancesAll)
|
||||
c.waitStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -153,7 +153,7 @@ func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorWaitStats, c.waitStatsPerfDataCollectors, c.collectWaitStatsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.waitStatsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), err)
|
||||
@@ -164,84 +164,84 @@ func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlIns
|
||||
c.waitStatsLockWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsLockWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsMemoryGrantQueueWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsMemoryGrantQueueWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsThreadSafeMemoryObjectsWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsThreadSafeMemoryObjectsWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsLogWriteWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsLogWriteWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsLogBufferWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsLogBufferWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsNetworkIOWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsNetworkIOWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsPageIOLatchWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsPageIOLatchWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsPageLatchWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsPageLatchWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsNonPageLatchWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsNonpageLatchWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsWaitForTheWorkerWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsWaitForTheWorkerWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsWorkspaceSynchronizationWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsWorkspaceSynchronizationWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsTransactionOwnershipWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsTransactionOwnershipWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,13 +8,14 @@ import (
|
||||
)
|
||||
|
||||
type mssqlInstance struct {
|
||||
name string
|
||||
majorVersion mssqlServerMajorVersion
|
||||
patchVersion string
|
||||
edition string
|
||||
name string
|
||||
majorVersion mssqlServerMajorVersion
|
||||
patchVersion string
|
||||
edition string
|
||||
isFirstInstance bool
|
||||
}
|
||||
|
||||
func newMssqlInstance(name string) (mssqlInstance, error) {
|
||||
func newMssqlInstance(key, name string) (mssqlInstance, error) {
|
||||
regKey := fmt.Sprintf(`Software\Microsoft\Microsoft SQL Server\%s\Setup`, name)
|
||||
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, regKey, registry.QUERY_VALUE)
|
||||
@@ -26,7 +27,7 @@ func newMssqlInstance(name string) (mssqlInstance, error) {
|
||||
_ = key.Close()
|
||||
}(k)
|
||||
|
||||
patchVersion, _, err := k.GetStringValue("Version")
|
||||
patchVersion, _, err := k.GetStringValue("PatchLevel")
|
||||
if err != nil {
|
||||
return mssqlInstance{}, fmt.Errorf("couldn't get version from registry: %w", err)
|
||||
}
|
||||
@@ -39,10 +40,11 @@ func newMssqlInstance(name string) (mssqlInstance, error) {
|
||||
_, name, _ = strings.Cut(name, ".")
|
||||
|
||||
return mssqlInstance{
|
||||
edition: edition,
|
||||
name: name,
|
||||
majorVersion: newMajorVersion(patchVersion),
|
||||
patchVersion: patchVersion,
|
||||
edition: edition,
|
||||
name: name,
|
||||
majorVersion: newMajorVersion(patchVersion),
|
||||
patchVersion: patchVersion,
|
||||
isFirstInstance: key == "MSSQLSERVER",
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,12 @@ import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const Name = "net"
|
||||
const (
|
||||
Name = "net"
|
||||
|
||||
subCollectorMetrics = "metrics"
|
||||
subCollectorNicInfo = "nic_addresses"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
NicExclude *regexp.Regexp `yaml:"nic_exclude"`
|
||||
@@ -46,8 +51,8 @@ var ConfigDefaults = Config{
|
||||
NicExclude: types.RegExpEmpty,
|
||||
NicInclude: types.RegExpAny,
|
||||
CollectorsEnabled: []string{
|
||||
"metrics",
|
||||
"nic_addresses",
|
||||
subCollectorMetrics,
|
||||
subCollectorNicInfo,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -157,17 +162,12 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Network Interface collector: %w", err)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "addresses") {
|
||||
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
for _, collector := range c.config.CollectorsEnabled {
|
||||
if !slices.Contains([]string{subCollectorMetrics, subCollectorNicInfo}, collector) {
|
||||
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
|
||||
strings.Join([]string{subCollectorMetrics, subCollectorNicInfo}, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
c.bytesReceivedTotal = prometheus.NewDesc(
|
||||
@@ -261,21 +261,34 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Network Interface collector: %w", err)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) {
|
||||
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
|
||||
if err := c.collect(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "nic_addresses") {
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) {
|
||||
if err := c.collectNICAddresses(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting net addresses: %w", err))
|
||||
}
|
||||
|
||||
@@ -26,25 +26,25 @@ import (
|
||||
|
||||
func (c *Collector) buildClrExceptions() {
|
||||
c.numberOfExceptionsThrown = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "exceptions_thrown_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_exceptions_thrown_total"),
|
||||
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfFilters = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "exceptions_filters_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_exceptions_filters_total"),
|
||||
"Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfFinally = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "exceptions_finallys_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_exceptions_finallys_total"),
|
||||
"Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.throwToCatchDepth = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "throw_to_catch_depth_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrExceptions+"_throw_to_catch_depth_total"),
|
||||
"Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -63,7 +63,7 @@ type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
|
||||
|
||||
func (c *Collector) collectClrExceptions(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRExceptions"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRExceptions"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,19 +26,19 @@ import (
|
||||
|
||||
func (c *Collector) buildClrInterop() {
|
||||
c.numberOfCCWs = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "com_callable_wrappers_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrInterop+"_com_callable_wrappers_total"),
|
||||
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfMarshalling = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "interop_marshalling_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrInterop+"_interop_marshalling_total"),
|
||||
"Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfStubs = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "interop_stubs_created_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrInterop+"_interop_stubs_created_total"),
|
||||
"Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -57,7 +57,7 @@ type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
|
||||
|
||||
func (c *Collector) collectClrInterop(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRInterop"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRInterop"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,25 +26,25 @@ import (
|
||||
|
||||
func (c *Collector) buildClrJIT() {
|
||||
c.numberOfMethodsJitted = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_methods_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_methods_total"),
|
||||
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.timeInJit = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_time_percent"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_time_percent"),
|
||||
"Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.standardJitFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_standard_failures_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_standard_failures_total"),
|
||||
"Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalNumberOfILBytesJitted = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_il_bytes_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrJIT+"_jit_il_bytes_total"),
|
||||
"Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -65,7 +65,7 @@ type Win32_PerfRawData_NETFramework_NETCLRJit struct {
|
||||
|
||||
func (c *Collector) collectClrJIT(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRJit"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRJit"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,55 +26,55 @@ import (
|
||||
|
||||
func (c *Collector) buildClrLoading() {
|
||||
c.bytesInLoaderHeap = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "loader_heap_size_bytes"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_loader_heap_size_bytes"),
|
||||
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.currentAppDomains = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_current"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_appdomains_loaded_current"),
|
||||
"Displays the current number of application domains loaded in this application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.currentAssemblies = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_current"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_assemblies_loaded_current"),
|
||||
"Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.currentClassesLoaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_current"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_classes_loaded_current"),
|
||||
"Displays the current number of classes loaded in all assemblies.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalAppDomains = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_appdomains_loaded_total"),
|
||||
"Displays the peak number of application domains loaded since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalAppDomainsUnloaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "appdomains_unloaded_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_appdomains_unloaded_total"),
|
||||
"Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalAssemblies = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_assemblies_loaded_total"),
|
||||
"Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalClassesLoaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_classes_loaded_total"),
|
||||
"Displays the cumulative number of classes loaded in all assemblies since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalNumberOfLoadFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "class_load_failures_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLoading+"_class_load_failures_total"),
|
||||
"Displays the peak number of classes that have failed to load since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -104,7 +104,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
|
||||
|
||||
func (c *Collector) collectClrLoading(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRLoading"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRLoading"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,43 +26,43 @@ import (
|
||||
|
||||
func (c *Collector) buildClrLocksAndThreads() {
|
||||
c.currentQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_queue_length"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_current_queue_length"),
|
||||
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfCurrentLogicalThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_logical_threads"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_current_logical_threads"),
|
||||
"Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. ",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfCurrentPhysicalThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "physical_threads_current"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_physical_threads_current"),
|
||||
"Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfCurrentRecognizedThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_current"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_recognized_threads_current"),
|
||||
"Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfTotalRecognizedThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_recognized_threads_total"),
|
||||
"Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.queueLengthPeak = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "queue_length_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_queue_length_total"),
|
||||
"Displays the total number of threads that waited to acquire a managed lock since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalNumberOfContentions = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "contentions_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrLocksAndThreads+"_contentions_total"),
|
||||
"Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -86,7 +86,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
|
||||
|
||||
func (c *Collector) collectClrLocksAndThreads(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,73 +26,73 @@ import (
|
||||
|
||||
func (c *Collector) buildClrMemory() {
|
||||
c.allocatedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "allocated_bytes_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_allocated_bytes_total"),
|
||||
"Displays the total number of bytes allocated on the garbage collection heap.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.finalizationSurvivors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "finalization_survivors"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_finalization_survivors"),
|
||||
"Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.heapSize = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "heap_size_bytes"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_heap_size_bytes"),
|
||||
"Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
)
|
||||
c.promotedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "promoted_bytes"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_promoted_bytes"),
|
||||
"Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
)
|
||||
c.numberGCHandles = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "number_gc_handles"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_number_gc_handles"),
|
||||
"Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberCollections = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "collections_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_collections_total"),
|
||||
"Displays the number of times the generation objects are garbage collected since the application started.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
)
|
||||
c.numberInducedGC = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "induced_gc_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_induced_gc_total"),
|
||||
"Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfPinnedObjects = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "number_pinned_objects"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_number_pinned_objects"),
|
||||
"Displays the number of pinned objects encountered in the last garbage collection.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfSinkBlocksInUse = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "number_sink_blocksinuse"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_number_sink_blocksinuse"),
|
||||
"Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberTotalCommittedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "committed_bytes"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_committed_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberTotalReservedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "reserved_bytes"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_reserved_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.timeInGC = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gc_time_percent"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrMemory+"_gc_time_percent"),
|
||||
"Displays the percentage of time that was spent performing a garbage collection in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -135,7 +135,7 @@ type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
|
||||
|
||||
func (c *Collector) collectClrMemory(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRMemory"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRMemory"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,37 +26,37 @@ import (
|
||||
|
||||
func (c *Collector) buildClrRemoting() {
|
||||
c.channels = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "channels_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_channels_total"),
|
||||
"Displays the total number of remoting channels registered across all application domains since application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contextBoundClassesLoaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_bound_classes_loaded"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_context_bound_classes_loaded"),
|
||||
"Displays the current number of context-bound classes that are loaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contextBoundObjects = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_bound_objects_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_context_bound_objects_total"),
|
||||
"Displays the total number of context-bound objects allocated.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contextProxies = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_proxies_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_context_proxies_total"),
|
||||
"Displays the total number of remoting proxy objects in this process since it started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contexts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "contexts"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_contexts"),
|
||||
"Displays the current number of remoting contexts in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalRemoteCalls = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "remote_calls_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrRemoting+"_remote_calls_total"),
|
||||
"Displays the total number of remote procedure calls invoked since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -77,7 +77,7 @@ type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
|
||||
|
||||
func (c *Collector) collectClrRemoting(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRRemoting"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRRemoting"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,25 +26,25 @@ import (
|
||||
|
||||
func (c *Collector) buildClrSecurity() {
|
||||
c.numberLinkTimeChecks = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "link_time_checks_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_link_time_checks_total"),
|
||||
"Displays the total number of link-time code access security checks since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.timeInRTChecks = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "rt_checks_time_percent"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_rt_checks_time_percent"),
|
||||
"Displays the percentage of time spent performing runtime code access security checks in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.stackWalkDepth = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "stack_walk_depth"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_stack_walk_depth"),
|
||||
"Displays the depth of the stack during that last runtime code access security check.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalRuntimeChecks = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "runtime_checks_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, collectorClrSecurity+"_runtime_checks_total"),
|
||||
"Displays the total number of runtime code access security checks performed since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
@@ -64,7 +64,7 @@ type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
|
||||
|
||||
func (c *Collector) collectClrSecurity(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * Win32_PerfRawData_NETFramework_NETCLRSecurity"))); err != nil {
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, utils.Must(mi.NewQuery("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRSecurity"))); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -28,5 +28,7 @@ func BenchmarkCollector(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestCollector(t *testing.T) {
|
||||
t.Skip("Skipping test as it requires WMI data")
|
||||
|
||||
testutils.TestCollector(t, netframework.New, nil)
|
||||
}
|
||||
|
||||
@@ -94,20 +94,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0, 2)
|
||||
|
||||
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accessAccepts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_accepts"),
|
||||
"(AccessAccepts)",
|
||||
@@ -260,13 +246,27 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectAccept(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting NPS accept data: %w", err))
|
||||
|
||||
@@ -20,11 +20,13 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/netapi32"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/psapi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/sysinfoapi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
@@ -47,12 +49,14 @@ type Collector struct {
|
||||
hostname *prometheus.Desc
|
||||
osInformation *prometheus.Desc
|
||||
|
||||
// users
|
||||
// Deprecated: Use windows_system_processes instead.
|
||||
processes *prometheus.Desc
|
||||
|
||||
// Deprecated: Use windows_system_process_limit instead.
|
||||
processesLimit *prometheus.Desc
|
||||
|
||||
// users
|
||||
// Deprecated: Use count(windows_logon_logon_type) instead.
|
||||
// Deprecated: Use `sum(windows_terminal_services_session_info{state="active"})` instead.
|
||||
users *prometheus.Desc
|
||||
|
||||
// physicalMemoryFreeBytes
|
||||
@@ -105,7 +109,7 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
logger.Warn("The os collect holds a number of deprecated metrics and will be removed mid 2025. "+
|
||||
logger.Warn("The os collector holds a number of deprecated metrics and will be removed mid 2025. "+
|
||||
"See https://github.com/prometheus-community/windows_exporter/pull/1596 for more information.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
@@ -117,6 +121,11 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
version := windows.RtlGetVersion()
|
||||
|
||||
// Microsoft has decided to keep the major version as "10" for Windows 11, including the product name.
|
||||
if version.BuildNumber >= 22000 {
|
||||
productName = strings.Replace(productName, " 10 ", " 11 ", 1)
|
||||
}
|
||||
|
||||
c.osInformation = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
`Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \"10\"; a build number greater than 22000 represents Windows 11.`,
|
||||
@@ -160,6 +169,12 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
c.processes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processes"),
|
||||
"Deprecated: Use `windows_system_processes` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.processesLimit = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processes_limit"),
|
||||
"Deprecated: Use `windows_system_process_limit` instead.",
|
||||
@@ -174,7 +189,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
)
|
||||
c.users = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "users"),
|
||||
"Deprecated: Use `count(windows_logon_logon_type)` instead.",
|
||||
"Deprecated: Use `sum(windows_terminal_services_session_info{state=\"active\"})` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
@@ -203,10 +218,14 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.collect(ch)
|
||||
|
||||
if err := c.collectProcessCount(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect process count metrics: %w", err))
|
||||
}
|
||||
|
||||
if err := c.collectHostname(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect hostname metrics: %w", err))
|
||||
}
|
||||
@@ -269,6 +288,20 @@ func (c *Collector) collectHostname(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectProcessCount(ch chan<- prometheus.Metric) error {
|
||||
gpi, err := psapi.GetPerformanceInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(c.processes,
|
||||
prometheus.GaugeValue,
|
||||
float64(gpi.ProcessCount),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectTime(ch chan<- prometheus.Metric) error {
|
||||
timeZoneInfo, err := kernel32.GetDynamicTimeZoneInformation()
|
||||
if err != nil {
|
||||
@@ -371,5 +404,5 @@ func (c *Collector) getWindowsVersion() (string, string, error) {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return productName, strconv.FormatUint(revision, 10), nil
|
||||
return strings.TrimSpace(productName), strconv.FormatUint(revision, 10), nil
|
||||
}
|
||||
|
||||
@@ -74,13 +74,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Paging File collector: %w", err)
|
||||
}
|
||||
|
||||
c.pagingLimitBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "limit_bytes"),
|
||||
"Number of bytes that can be stored in the operating system paging files. 0 (zero) indicates that there are no paging files",
|
||||
@@ -95,6 +88,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Paging File collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -127,13 +127,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
c.requestsQueued = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
|
||||
"The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)",
|
||||
@@ -218,6 +211,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -126,25 +126,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinter = miQuery
|
||||
|
||||
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinterJobs = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.printerJobStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "job_status"),
|
||||
"A counter of printer jobs by status",
|
||||
@@ -164,6 +145,25 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinter = miQuery
|
||||
|
||||
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinterJobs = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -76,6 +76,8 @@ type Collector struct {
|
||||
poolBytes *prometheus.Desc
|
||||
priorityBase *prometheus.Desc
|
||||
privateBytes *prometheus.Desc
|
||||
// Deprecated: Use start_time_seconds_timestamp instead
|
||||
startTimeOld *prometheus.Desc
|
||||
startTime *prometheus.Desc
|
||||
threadCount *prometheus.Desc
|
||||
virtualBytes *prometheus.Desc
|
||||
@@ -214,8 +216,15 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
c.startTime = prometheus.NewDesc(
|
||||
c.startTimeOld = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "start_time"),
|
||||
"DEPRECATED: Use start_time_seconds_timestamp instead",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.startTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "start_time_seconds_timestamp"),
|
||||
"Time of process start.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
|
||||
@@ -141,6 +141,13 @@ func (c *Collector) collectWorkerV1() {
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.startTimeOld,
|
||||
prometheus.GaugeValue,
|
||||
data.ElapsedTime,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.handleCount,
|
||||
prometheus.GaugeValue,
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -134,10 +135,19 @@ func (c *Collector) collectWorkerV2() {
|
||||
name, pidString, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
|
||||
)
|
||||
|
||||
startTime := float64(time.Now().Unix() - int64(data.ElapsedTime))
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.startTimeOld,
|
||||
prometheus.GaugeValue,
|
||||
startTime,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.startTime,
|
||||
prometheus.GaugeValue,
|
||||
data.ElapsedTime,
|
||||
startTime,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
|
||||
@@ -102,18 +102,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)
|
||||
}
|
||||
|
||||
// net
|
||||
c.baseTCPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
|
||||
@@ -238,13 +226,27 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create RemoteFX Network collector: %w", err))
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectRemoteFXNetworkCount(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting RemoteFX Network metrics: %w", err))
|
||||
|
||||
@@ -248,7 +248,7 @@ func getScheduledTasks() (ScheduledTasks, error) {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
|
||||
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {
|
||||
var oleCode *ole.OleError
|
||||
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != S_FALSE {
|
||||
return nil, err
|
||||
|
||||
@@ -64,7 +64,8 @@ type Collector struct {
|
||||
// ref: https://victoriametrics.com/blog/go-sync-pool/
|
||||
serviceConfigPoolBytes sync.Pool
|
||||
|
||||
serviceManagerHandle *mgr.Mgr
|
||||
serviceManagerHandle *mgr.Mgr
|
||||
queryAllServicesBuffer []byte
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -140,6 +141,8 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
},
|
||||
}
|
||||
|
||||
c.queryAllServicesBuffer = make([]byte, 1024*200)
|
||||
|
||||
c.info = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"A metric with a constant '1' value labeled with service information",
|
||||
@@ -209,7 +212,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
services, err := c.queryAllServices()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query services: %w", err)
|
||||
return fmt.Errorf("failed to query all services: %w", err)
|
||||
}
|
||||
|
||||
servicesCh := make(chan windows.ENUM_SERVICE_STATUS_PROCESS, len(services))
|
||||
@@ -237,6 +240,15 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectWorker(ch chan<- prometheus.Metric, service windows.ENUM_SERVICE_STATUS_PROCESS) {
|
||||
if uintptr(unsafe.Pointer(service.ServiceName)) == uintptr(windows.InvalidHandle) {
|
||||
c.logger.Log(context.Background(), slog.LevelWarn, "failed collecting service info",
|
||||
slog.String("err", "ServiceName is 0xffffffffffffffff"),
|
||||
slog.String("service", fmt.Sprintf("%+v", service)),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
serviceName := windows.UTF16PtrToString(service.ServiceName)
|
||||
|
||||
if c.config.ServiceExclude.MatchString(serviceName) || !c.config.ServiceInclude.MatchString(serviceName) {
|
||||
@@ -347,7 +359,9 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
|
||||
|
||||
logLevel := slog.LevelWarn
|
||||
|
||||
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
|
||||
// ERROR_INVALID_PARAMETER returns when the process is not running. This can be happened
|
||||
// if the service terminated after query the service API.
|
||||
if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) {
|
||||
logLevel = slog.LevelDebug
|
||||
}
|
||||
|
||||
@@ -363,22 +377,24 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
|
||||
// This is realized by ask Service Manager directly.
|
||||
func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
|
||||
var (
|
||||
bytesNeeded uint32
|
||||
servicesReturned uint32
|
||||
err error
|
||||
additionalBytesNeeded uint32
|
||||
servicesReturned uint32
|
||||
err error
|
||||
)
|
||||
|
||||
buf := make([]byte, 1024*100)
|
||||
clear(c.queryAllServicesBuffer)
|
||||
|
||||
for {
|
||||
currentBufferSize := uint32(cap(c.queryAllServicesBuffer))
|
||||
|
||||
err = windows.EnumServicesStatusEx(
|
||||
c.serviceManagerHandle.Handle,
|
||||
windows.SC_STATUS_PROCESS_INFO,
|
||||
windows.SERVICE_WIN32,
|
||||
windows.SERVICE_STATE_ALL,
|
||||
&buf[0],
|
||||
uint32(len(buf)),
|
||||
&bytesNeeded,
|
||||
&c.queryAllServicesBuffer[0],
|
||||
currentBufferSize,
|
||||
&additionalBytesNeeded,
|
||||
&servicesReturned,
|
||||
nil,
|
||||
nil,
|
||||
@@ -392,18 +408,21 @@ func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bytesNeeded <= uint32(len(buf)) {
|
||||
return nil, err
|
||||
}
|
||||
/*
|
||||
Unlike other WIN32 API calls, additionalBytesNeeded is not returning the absolute amount bytes needed,
|
||||
but the additional bytes needed relative to the cbBufSize parameter.
|
||||
ref:
|
||||
https://stackoverflow.com/questions/14756347/when-calling-enumservicesstatusex-twice-i-still-get-eror-more-data-in-c
|
||||
*/
|
||||
|
||||
buf = make([]byte, bytesNeeded)
|
||||
c.queryAllServicesBuffer = make([]byte, currentBufferSize+additionalBytesNeeded)
|
||||
}
|
||||
|
||||
if servicesReturned == 0 {
|
||||
return []windows.ENUM_SERVICE_STATUS_PROCESS{}, nil
|
||||
}
|
||||
|
||||
services := unsafe.Slice((*windows.ENUM_SERVICE_STATUS_PROCESS)(unsafe.Pointer(&buf[0])), int(servicesReturned))
|
||||
services := unsafe.Slice((*windows.ENUM_SERVICE_STATUS_PROCESS)(unsafe.Pointer(&c.queryAllServicesBuffer[0])), int(servicesReturned))
|
||||
|
||||
return services, nil
|
||||
}
|
||||
@@ -414,15 +433,6 @@ func (c *Collector) getProcessStartTime(pid uint32) (uint64, error) {
|
||||
return 0, fmt.Errorf("failed to open process %w", err)
|
||||
}
|
||||
|
||||
defer func(handle windows.Handle) {
|
||||
err := windows.CloseHandle(handle)
|
||||
if err != nil {
|
||||
c.logger.Warn("failed to close process handle",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}(handle)
|
||||
|
||||
var (
|
||||
creation windows.Filetime
|
||||
exit windows.Filetime
|
||||
@@ -431,6 +441,14 @@ func (c *Collector) getProcessStartTime(pid uint32) (uint64, error) {
|
||||
)
|
||||
|
||||
err = windows.GetProcessTimes(handle, &creation, &exit, &krn, &user)
|
||||
|
||||
if err := windows.CloseHandle(handle); err != nil {
|
||||
c.logger.LogAttrs(context.Background(), slog.LevelWarn, "failed to close process handle",
|
||||
slog.Any("err", err),
|
||||
slog.Uint64("pid", uint64(pid)),
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get process times %w", err)
|
||||
}
|
||||
@@ -471,7 +489,7 @@ func (c *Collector) getServiceConfig(service *mgr.Service) (mgr.Config, error) {
|
||||
*buf = make([]byte, bytesNeeded)
|
||||
}
|
||||
|
||||
c.serviceConfigPoolBytes.Put(buf)
|
||||
defer c.serviceConfigPoolBytes.Put(buf)
|
||||
|
||||
return mgr.Config{
|
||||
BinaryPathName: windows.UTF16PtrToString(serviceConfig.BinaryPathName),
|
||||
|
||||
@@ -76,13 +76,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
|
||||
}
|
||||
|
||||
c.currentOpenFileCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_current_open_file_count"),
|
||||
"Current total count open files on the SMB Server Share",
|
||||
@@ -132,6 +125,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -91,13 +91,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
|
||||
}
|
||||
|
||||
// desc creates a new prometheus description
|
||||
desc := func(metricName string, description string, labels []string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
@@ -193,6 +186,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -157,13 +157,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("smtp collector is in an experimental state! Metrics for this collector have not been tested.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
@@ -421,6 +414,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -18,8 +18,10 @@ package system
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
@@ -37,6 +39,8 @@ var ConfigDefaults = Config{}
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
bootTimeTimestamp float64
|
||||
|
||||
perfDataCollector *pdh.Collector
|
||||
perfDataObject []perfDataCounterValues
|
||||
|
||||
@@ -46,8 +50,10 @@ type Collector struct {
|
||||
processes *prometheus.Desc
|
||||
processesLimit *prometheus.Desc
|
||||
systemCallsTotal *prometheus.Desc
|
||||
bootTime *prometheus.Desc
|
||||
threads *prometheus.Desc
|
||||
// Deprecated: Use windows_system_boot_time_timestamp instead
|
||||
bootTimeSeconds *prometheus.Desc
|
||||
bootTime *prometheus.Desc
|
||||
threads *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -77,19 +83,18 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create System collector: %w", err)
|
||||
}
|
||||
|
||||
c.bootTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp"),
|
||||
"Unix timestamp of system boot time",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.bootTimeSeconds = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"),
|
||||
"Deprecated: Use windows_system_boot_time_timestamp instead",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.contextSwitchesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_switches_total"),
|
||||
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
|
||||
@@ -134,6 +139,15 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
c.bootTimeTimestamp = float64(time.Now().Unix() - int64(kernel32.GetTickCount64()/1000))
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create System collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -143,6 +157,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect System metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect System metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
@@ -170,17 +186,24 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
prometheus.CounterValue,
|
||||
c.perfDataObject[0].SystemCallsPerSec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bootTime,
|
||||
prometheus.GaugeValue,
|
||||
c.perfDataObject[0].SystemUpTime,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.threads,
|
||||
prometheus.GaugeValue,
|
||||
c.perfDataObject[0].Threads,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bootTimeSeconds,
|
||||
prometheus.GaugeValue,
|
||||
c.bootTimeTimestamp,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bootTime,
|
||||
prometheus.GaugeValue,
|
||||
c.bootTimeTimestamp,
|
||||
)
|
||||
|
||||
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
|
||||
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824
|
||||
// https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user