mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 05:56:37 +00:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db60c78f32 | ||
|
|
bdd7725f17 | ||
|
|
9ed3769765 | ||
|
|
aa7157e27c | ||
|
|
13d5e1cd12 | ||
|
|
2c4698f119 | ||
|
|
759faee1c3 | ||
|
|
50808c73fe | ||
|
|
fe17f5f597 | ||
|
|
b62c724977 | ||
|
|
7252d403ae | ||
|
|
3180315cff | ||
|
|
9da6e56fcf | ||
|
|
c300935170 | ||
|
|
6f0209ddb7 |
@@ -13,4 +13,4 @@ indent_size = 4
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
indent_size = 2
|
||||
|
||||
6
.github/workflows/lint.yml
vendored
6
.github/workflows/lint.yml
vendored
@@ -8,11 +8,9 @@ on:
|
||||
- master
|
||||
- next
|
||||
- main
|
||||
- "0.*"
|
||||
- "1.*"
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- main
|
||||
|
||||
env:
|
||||
VERSION_PROMU: '0.14.0'
|
||||
|
||||
2
.github/workflows/pr-check.yaml
vendored
2
.github/workflows/pr-check.yaml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
- name: check
|
||||
run: |
|
||||
PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1)
|
||||
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
|
||||
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ Name | Description | Enabled by default
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[license](docs/collector.license.md) | Windows license status |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[logon](docs/collector.logon.md) | User logon sessions |
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics | ✓
|
||||
[mscluster](docs/collector.mscluster.md) | MSCluster metrics |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
|
||||
@@ -47,7 +47,11 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
exitCode := run()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
|
||||
|
||||
exitCode := run(ctx, os.Args[1:])
|
||||
|
||||
stop()
|
||||
|
||||
// If we are running as a service, we need to signal the service control manager that we are done.
|
||||
if !IsService {
|
||||
@@ -60,9 +64,8 @@ func main() {
|
||||
<-serviceManagerFinishedCh
|
||||
}
|
||||
|
||||
func run() int {
|
||||
func run(ctx context.Context, args []string) int {
|
||||
startTime := time.Now()
|
||||
ctx := context.Background()
|
||||
|
||||
app := kingpin.New("windows_exporter", "A metrics collector for Windows.")
|
||||
|
||||
@@ -71,7 +74,7 @@ func run() int {
|
||||
"config.file",
|
||||
"YAML configuration file to use. Values set in this file will be overridden by CLI flags.",
|
||||
).String()
|
||||
insecureSkipVerify = app.Flag(
|
||||
_ = app.Flag(
|
||||
"config.file.insecure-skip-verify",
|
||||
"Skip TLS verification in loading YAML configuration.",
|
||||
).Default("false").Bool()
|
||||
@@ -122,11 +125,10 @@ func run() int {
|
||||
// Initialize collectors before loading and parsing CLI arguments
|
||||
collectors := collector.NewWithFlags(app)
|
||||
|
||||
// Load values from configuration file(s). Executable flags must first be parsed, in order
|
||||
// to load the specified file(s).
|
||||
if _, err := app.Parse(os.Args[1:]); err != nil {
|
||||
//nolint:contextcheck
|
||||
if err := config.Parse(app, args); err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("Failed to parse CLI args",
|
||||
slog.LogAttrs(ctx, slog.LevelError, "Failed to load configuration",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -137,62 +139,21 @@ func run() int {
|
||||
|
||||
logger, err := log.New(logConfig)
|
||||
if err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("failed to create logger",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "failed to create logger",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
if *configFile != "" {
|
||||
resolver, err := config.NewResolver(ctx, *configFile, logger, *insecureSkipVerify)
|
||||
if err != nil {
|
||||
logger.Error("could not load config file",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
if err = resolver.Bind(app, os.Args[1:]); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to bind configuration",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
// Parse flags once more to include those discovered in configuration file(s).
|
||||
if _, err = app.Parse(os.Args[1:]); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to parse CLI args from YAML file",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
// NOTE: This is temporary fix for issue #1092, calling kingpin.Parse
|
||||
// twice makes slices flags duplicate its value, this clean up
|
||||
// the first parse before the second call.
|
||||
slices.Sort(*webConfig.WebListenAddresses)
|
||||
*webConfig.WebListenAddresses = slices.Clip(slices.Compact(*webConfig.WebListenAddresses))
|
||||
|
||||
logger, err = log.New(logConfig)
|
||||
if err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("failed to create logger",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
}
|
||||
if configFile != nil && *configFile != "" {
|
||||
logger.InfoContext(ctx, "using configuration file: "+*configFile)
|
||||
}
|
||||
|
||||
logger.LogAttrs(ctx, slog.LevelDebug, "logging has Started")
|
||||
|
||||
if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil {
|
||||
logger.Error("failed to set process priority",
|
||||
if err = setPriorityWindows(ctx, logger, os.Getpid(), *processPriority); err != nil {
|
||||
logger.LogAttrs(ctx, slog.LevelError, "failed to set process priority",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -201,7 +162,7 @@ func run() int {
|
||||
|
||||
enabledCollectorList := expandEnabledCollectors(*enabledCollectors)
|
||||
if err := collectors.Enable(enabledCollectorList); err != nil {
|
||||
logger.Error("couldn't enable collectors",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "couldn't enable collectors",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -209,9 +170,9 @@ func run() int {
|
||||
}
|
||||
|
||||
// Initialize collectors before loading
|
||||
if err = collectors.Build(logger); err != nil {
|
||||
if err = collectors.Build(ctx, logger); err != nil {
|
||||
for _, err := range utils.SplitError(err) {
|
||||
logger.Error("couldn't initialize collector",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "couldn't initialize collector",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -266,17 +227,14 @@ func run() int {
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, os.Kill)
|
||||
defer stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Info("Shutting down windows_exporter via kill signal")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via kill signal")
|
||||
case <-stopCh:
|
||||
logger.Info("Shutting down windows_exporter via service control")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via service control")
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "Failed to start windows_exporter",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "Failed to start windows_exporter",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -287,9 +245,9 @@ func run() int {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_ = server.Shutdown(ctx)
|
||||
_ = server.Shutdown(ctx) //nolint:contextcheck // create a new context for server shutdown
|
||||
|
||||
logger.InfoContext(ctx, "windows_exporter has shut down")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "windows_exporter has shut down") //nolint:contextcheck
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -312,7 +270,7 @@ func logCurrentUser(logger *slog.Logger) {
|
||||
}
|
||||
|
||||
// setPriorityWindows sets the priority of the current process to the specified value.
|
||||
func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
|
||||
func setPriorityWindows(ctx context.Context, logger *slog.Logger, pid int, priority string) error {
|
||||
// Mapping of priority names to uin32 values required by windows.SetPriorityClass.
|
||||
priorityStringToInt := map[string]uint32{
|
||||
"realtime": windows.REALTIME_PRIORITY_CLASS,
|
||||
@@ -330,7 +288,7 @@ func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.LogAttrs(context.Background(), slog.LevelDebug, "setting process priority to "+priority)
|
||||
logger.LogAttrs(ctx, slog.LevelDebug, "setting process priority to "+priority)
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
|
||||
handle, err := windows.OpenProcess(
|
||||
|
||||
188
cmd/windows_exporter/main_test.go
Normal file
188
cmd/windows_exporter/main_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//nolint:tparallel
|
||||
func TestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
args []string
|
||||
config string
|
||||
metricsEndpoint string
|
||||
exitCode int
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
args: []string{},
|
||||
metricsEndpoint: "http://127.0.0.1:9182/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address",
|
||||
args: []string{"--web.listen-address=127.0.0.1:8080"},
|
||||
metricsEndpoint: "http://127.0.0.1:8080/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address",
|
||||
args: []string{"--web.listen-address=127.0.0.1:8081", "--web.listen-address=[::1]:8081"},
|
||||
metricsEndpoint: "http://[::1]:8081/metrics",
|
||||
},
|
||||
{
|
||||
name: "config",
|
||||
args: []string{"--config.file=config.yaml"},
|
||||
config: `{"web":{"listen-address":"127.0.0.1:8082"}}`,
|
||||
metricsEndpoint: "http://127.0.0.1:8082/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address with config",
|
||||
args: []string{"--config.file=config.yaml", "--web.listen-address=127.0.0.1:8084"},
|
||||
config: `{"web":{"listen-address":"127.0.0.1:8083"}}`,
|
||||
metricsEndpoint: "http://127.0.0.1:8084/metrics",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if tc.config != "" {
|
||||
// Create a temporary config file.
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "config-*.yaml")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, tmpfile.Close())
|
||||
})
|
||||
|
||||
_, err = tmpfile.WriteString(tc.config)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, arg := range tc.args {
|
||||
tc.args[i] = strings.ReplaceAll(arg, "config.yaml", tmpfile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
exitCodeCh := make(chan int)
|
||||
|
||||
var stdout string
|
||||
|
||||
go func() {
|
||||
stdout = captureOutput(t, func() {
|
||||
// Simulate the service control manager signaling that we are done.
|
||||
exitCodeCh <- run(ctx, tc.args)
|
||||
})
|
||||
}()
|
||||
|
||||
t.Cleanup(func() {
|
||||
select {
|
||||
case exitCode := <-exitCodeCh:
|
||||
require.Equal(t, tc.exitCode, exitCode)
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatalf("timed out waiting for exit code, want %d", tc.exitCode)
|
||||
}
|
||||
})
|
||||
|
||||
if tc.exitCode != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
uri, err := url.Parse(tc.metricsEndpoint)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = waitUntilListening(t, "tcp", uri.Host)
|
||||
require.NoError(t, err, "LOGS:\n%s", stdout)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, tc.metricsEndpoint, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err, "LOGS:\n%s", stdout)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEmpty(t, body)
|
||||
require.Contains(t, string(body), "# HELP windows_exporter_build_info")
|
||||
|
||||
cancel()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func captureOutput(tb testing.TB, f func()) string {
|
||||
tb.Helper()
|
||||
|
||||
orig := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
f()
|
||||
|
||||
os.Stdout = orig
|
||||
|
||||
_ = w.Close()
|
||||
|
||||
out, _ := io.ReadAll(r)
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func waitUntilListening(tb testing.TB, network, address string) error {
|
||||
tb.Helper()
|
||||
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
)
|
||||
|
||||
for range 10 {
|
||||
conn, err = net.DialTimeout(network, address, 100*time.Millisecond)
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if errors.Is(err, windows.Errno(10061)) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("listener not listening: %w", err)
|
||||
}
|
||||
25
config.yaml
25
config.yaml
@@ -1,23 +1,2 @@
|
||||
# example configuration file for windows_exporter
|
||||
|
||||
collectors:
|
||||
enabled: cpu,cpu_info,exchange,iis,logical_disk,logon,memory,net,os,performancecounter,process,remote_fx,service,system,tcp,time,terminal_services,textfile
|
||||
collector:
|
||||
service:
|
||||
include: "windows_exporter"
|
||||
performancecounter:
|
||||
objects: |-
|
||||
- name: photon_udp
|
||||
object: "Photon Socket Server: UDP"
|
||||
instances: ["*"]
|
||||
counters:
|
||||
- name: "UDP: Datagrams in"
|
||||
metric: "photon_udp_datagrams"
|
||||
labels:
|
||||
direction: "in"
|
||||
- name: "UDP: Datagrams out"
|
||||
metric: "photon_udp_datagrams"
|
||||
labels:
|
||||
direction: "out"
|
||||
log:
|
||||
level: warn
|
||||
web:
|
||||
listen-address: ":9183"
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
# logon collector
|
||||
|
||||
The logon collector exposes metrics detailing the active user logon sessions.
|
||||
|
||||
| | |
|
||||
|---------------------|-----------|
|
||||
| Metric name prefix | `logon` |
|
||||
| Source | Win32 API |
|
||||
| Enabled by default? | No |
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|-------------------------------------------|--------------------------------------------|-------|------------------------------------|
|
||||
| `windows_logon_session_logon_timestamp_seconds` | timestamp of the logon session in seconds. | gauge | `domain`, `id`, `type`, `username` |
|
||||
|
||||
### Example metric
|
||||
Query the total number of interactive logon sessions
|
||||
```
|
||||
# HELP windows_logon_session_logon_timestamp_seconds timestamp of the logon session in seconds.
|
||||
# TYPE windows_logon_session_logon_timestamp_seconds gauge
|
||||
windows_logon_session_logon_timestamp_seconds{domain="",id="0x0:0x8c54",type="System",username=""} 1.72876928e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x991a",type="Interactive",username="UMFD-1"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x9933",type="Interactive",username="UMFD-0"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x994a",type="Interactive",username="UMFD-0"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x999d",type="Interactive",username="UMFD-1"} 1.728769282e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf25a",type="Interactive",username="UMFD-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf290",type="Interactive",username="UMFD-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x130241",type="Network",username="vm-jok-dev$"} 1.728769625e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x24f7c9",type="Network",username="vm-jok-dev$"} 1.728770121e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x276846",type="Network",username="vm-jok-dev$"} 1.728770195e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e4",type="Service",username="vm-jok-dev$"} 1.728769283e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e7",type="System",username="vm-jok-dev$"} 1.728769279e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x71d0f",type="Network",username="vm-jok-dev$"} 1.728769324e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x720a3",type="Network",username="vm-jok-dev$"} 1.728769324e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x725cb",type="Network",username="vm-jok-dev$"} 1.728769324e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x753d8",type="Network",username="vm-jok-dev$"} 1.728769325e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xa3913",type="Network",username="vm-jok-dev$"} 1.728769385e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xbe7f2",type="Network",username="jok"} 1.728769531e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xc76c4",type="RemoteInteractive",username="jok"} 1.728769533e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e3",type="Service",username="IUSR"} 1.728769295e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e5",type="Service",username="LOCAL SERVICE"} 1.728769283e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xae4c7",type="Service",username="MSSQLSERVER"} 1.728769425e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xb42f1",type="Service",username="SQLTELEMETRY"} 1.728769431e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfbac",type="Interactive",username="DWM-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfc72",type="Interactive",username="DWM-2"} 1.728769532e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdedd",type="Interactive",username="DWM-1"} 1.728769283e+09
|
||||
windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdefd",type="Interactive",username="DWM-1"} 1.728769283e+09
|
||||
```
|
||||
|
||||
### Possible values for `type`
|
||||
|
||||
- System
|
||||
- Interactive
|
||||
- Network
|
||||
- Batch
|
||||
- Service
|
||||
- Proxy
|
||||
- Unlock
|
||||
- NetworkCleartext
|
||||
- NewCredentials
|
||||
- RemoteInteractive
|
||||
- CachedInteractive
|
||||
- CachedRemoteInteractive
|
||||
- CachedUnlock
|
||||
|
||||
## Useful queries
|
||||
Query the total number of local and remote (I.E. Terminal Services) interactive sessions.
|
||||
```
|
||||
count(windows_logon_logon_type{type=~"Interactive|RemoteInteractive"}) by (type)
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector doesn’t yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -16,7 +16,7 @@ None
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|--------|
|
||||
| `windows_system_boot_time_timestamp_seconds` | Unix timestamp of last system boot | gauge | None |
|
||||
| `windows_system_boot_time_timestamp` | Unix timestamp of last system boot | gauge | None |
|
||||
| `windows_system_context_switches_total` | Total number of [context switches](https://en.wikipedia.org/wiki/Context_switch) | counter | None |
|
||||
| `windows_system_exception_dispatches_total` | Total exceptions dispatched by the system | counter | None |
|
||||
| `windows_system_processes` | Number of process contexts currently loaded or running on the operating system | gauge | None |
|
||||
@@ -41,7 +41,7 @@ windows_system_processes{instance="localhost"}
|
||||
## Useful queries
|
||||
Find hosts that have rebooted in the last 24 hours
|
||||
```
|
||||
time() - windows_system_boot_time_timestamp_seconds < 86400
|
||||
time() - windows_system_boot_time_timestamp < 86400
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
|
||||
@@ -21,16 +21,17 @@ Matching is case-sensitive.
|
||||
|
||||
## Metrics
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------|
|
||||
| `windows_time_clock_frequency_adjustment_ppb_total` | Total adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | counter | None |
|
||||
| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None |
|
||||
| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None |
|
||||
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
|
||||
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
|
||||
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
|
||||
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None |
|
||||
| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` |
|
||||
| Name | Description | Type | Labels |
|
||||
|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------|
|
||||
| `windows_time_clock_frequency_adjustment` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None |
|
||||
| `windows_time_clock_frequency_adjustment_ppb` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None |
|
||||
| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None |
|
||||
| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None |
|
||||
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
|
||||
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
|
||||
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
|
||||
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None |
|
||||
| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` |
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -130,13 +130,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
||||
c.addressBookOperationsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"),
|
||||
"",
|
||||
@@ -511,6 +504,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -82,13 +82,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
|
||||
c.requestsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Total certificate requests processed",
|
||||
@@ -168,6 +161,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -112,13 +112,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
||||
c.adLoginConnectionFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
|
||||
"Total number of connection failures to an Active Directory domain controller",
|
||||
@@ -378,6 +371,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
14
internal/collector/cache/cache.go
vendored
14
internal/collector/cache/cache.go
vendored
@@ -98,13 +98,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
|
||||
c.asyncCopyReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
|
||||
"(AsyncCopyReadsTotal)",
|
||||
@@ -280,6 +273,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -89,15 +89,8 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.mu = sync.Mutex{}
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
|
||||
c.logicalProcessors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logical_processor"),
|
||||
"Total number of logical processors",
|
||||
@@ -186,6 +179,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
c.processorRTCValues = map[string]utils.Counter{}
|
||||
c.processorMPerfValues = map[string]utils.Counter{}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -75,18 +75,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.cpuInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "", Name),
|
||||
"Labelled CPU information as provided by Win32_Processor",
|
||||
@@ -148,6 +136,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
var dst []miProcessor
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
|
||||
@@ -160,29 +160,6 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
|
||||
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// connection
|
||||
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
|
||||
@@ -473,13 +450,36 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect implements the Collector interface.
|
||||
// Sends metric values for each metric to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 3)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
errs = append(errs, c.collectPDHConnection(ch))
|
||||
|
||||
@@ -148,12 +148,79 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
|
||||
}
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
|
||||
c.scopeInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_info"),
|
||||
"DHCP Scope information",
|
||||
[]string{"name", "superscope_name", "superscope_id", "scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_state"),
|
||||
"DHCP Scope state",
|
||||
[]string{"scope", "state"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"),
|
||||
"DHCP Scope free addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"),
|
||||
"DHCP Scope free addresses on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"),
|
||||
"DHCP Scope free addresses on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"),
|
||||
"DHCP Scope addresses in use",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"),
|
||||
"DHCP Scope addresses in use on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"),
|
||||
"DHCP Scope addresses in use on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopePendingOffersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"),
|
||||
"DHCP Scope pending offers",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeReservedAddressTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"),
|
||||
"DHCP Scope reserved addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
|
||||
c.packetsReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
|
||||
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
|
||||
@@ -304,78 +371,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
|
||||
c.scopeInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_info"),
|
||||
"DHCP Scope information",
|
||||
[]string{"name", "superscope_name", "superscope_id", "scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_state"),
|
||||
"DHCP Scope state",
|
||||
[]string{"scope", "state"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"),
|
||||
"DHCP Scope free addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"),
|
||||
"DHCP Scope free addresses on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"),
|
||||
"DHCP Scope free addresses on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"),
|
||||
"DHCP Scope addresses in use",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"),
|
||||
"DHCP Scope addresses in use on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"),
|
||||
"DHCP Scope addresses in use on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopePendingOffersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"),
|
||||
"DHCP Scope pending offers",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeReservedAddressTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"),
|
||||
"DHCP Scope reserved addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -72,18 +72,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.diskInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"General drive information",
|
||||
@@ -120,6 +108,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
var dst []diskDrive
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
|
||||
@@ -91,13 +91,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS collector: %w", err)
|
||||
}
|
||||
|
||||
c.zoneTransferRequestsReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
|
||||
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
|
||||
@@ -231,6 +224,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -148,7 +148,7 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled))
|
||||
c.closeFns = make([]func(), 0, len(c.config.CollectorsEnabled))
|
||||
|
||||
@@ -224,9 +224,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
close: c.perfDataCollectorVirtualNetworkAdapterDropReasons.Close,
|
||||
},
|
||||
subCollectorVirtualSMB: {
|
||||
build: c.buildVirtualSMB,
|
||||
collect: c.collectVirtualSMB,
|
||||
close: c.perfDataCollectorVirtualSMB.Close,
|
||||
build: c.buildVirtualSMB,
|
||||
collect: c.collectVirtualSMB,
|
||||
close: c.perfDataCollectorVirtualSMB.Close,
|
||||
minBuildNumber: osversion.LTSC2022,
|
||||
},
|
||||
subCollectorVirtualStorageDevice: {
|
||||
build: c.buildVirtualStorageDevice,
|
||||
@@ -253,7 +254,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
}
|
||||
|
||||
if buildNumber < subCollectors[name].minBuildNumber {
|
||||
errs = append(errs, fmt.Errorf("collector %s requires Windows Server 2022 or newer", name))
|
||||
logger.Warn(fmt.Sprintf(
|
||||
"collector %s requires windows build version %d. Current build version: %d",
|
||||
name, subCollectors[name].minBuildNumber, buildNumber,
|
||||
), slog.String("collector", name))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package hyperv
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus-community/windows_exporter/internal/utils"
|
||||
@@ -40,7 +41,7 @@ type perfDataCounterValuesDynamicMemoryBalancer struct {
|
||||
|
||||
// Hyper-V Dynamic Memory Balancer metrics
|
||||
VmDynamicMemoryBalancerAvailableMemory float64 `perfdata:"Available Memory"`
|
||||
VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing"`
|
||||
VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing" perfdata_min_build:"17763"`
|
||||
VmDynamicMemoryBalancerAveragePressure float64 `perfdata:"Average Pressure"`
|
||||
VmDynamicMemoryBalancerSystemCurrentPressure float64 `perfdata:"System Current Pressure"`
|
||||
}
|
||||
@@ -96,12 +97,14 @@ func (c *Collector) collectDynamicMemoryBalancer(ch chan<- prometheus.Metric) er
|
||||
data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmDynamicMemoryBalancerAvailableMemoryForBalancing,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing),
|
||||
data.Name,
|
||||
)
|
||||
if osversion.Build() >= osversion.LTSC2019 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmDynamicMemoryBalancerAvailableMemoryForBalancing,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing),
|
||||
data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmDynamicMemoryBalancerAveragePressure,
|
||||
|
||||
@@ -18,6 +18,7 @@ package hyperv
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus-community/windows_exporter/internal/utils"
|
||||
@@ -47,7 +48,7 @@ type perfDataCounterValuesDynamicMemoryVM struct {
|
||||
// Hyper-V Dynamic Memory VM metrics
|
||||
VmMemoryAddedMemory float64 `perfdata:"Added Memory"`
|
||||
VmMemoryCurrentPressure float64 `perfdata:"Current Pressure"`
|
||||
VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory"`
|
||||
VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory" perfdata_min_build:"17763"`
|
||||
VmMemoryGuestVisiblePhysicalMemory float64 `perfdata:"Guest Visible Physical Memory"`
|
||||
VmMemoryMaximumPressure float64 `perfdata:"Maximum Pressure"`
|
||||
VmMemoryMemoryAddOperations float64 `perfdata:"Memory Add Operations"`
|
||||
@@ -150,12 +151,14 @@ func (c *Collector) collectDynamicMemoryVM(ch chan<- prometheus.Metric) error {
|
||||
data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmMemoryGuestAvailableMemory,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmMemoryGuestAvailableMemory),
|
||||
data.Name,
|
||||
)
|
||||
if osversion.Build() >= osversion.LTSC2019 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmMemoryGuestAvailableMemory,
|
||||
prometheus.GaugeValue,
|
||||
utils.MBToBytes(data.VmMemoryGuestAvailableMemory),
|
||||
data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.vmMemoryGuestVisiblePhysicalMemory,
|
||||
|
||||
@@ -40,7 +40,7 @@ type collectorHypervisorVirtualProcessor struct {
|
||||
type perfDataCounterValuesHypervisorVirtualProcessor struct {
|
||||
Name string
|
||||
|
||||
HypervisorVirtualProcessorGuestIdleTimePercent float64 `perfdata:"% Guest Idle Time"`
|
||||
HypervisorVirtualProcessorGuestRunTimePercent float64 `perfdata:"% Guest Run Time"`
|
||||
HypervisorVirtualProcessorHypervisorRunTimePercent float64 `perfdata:"% Hypervisor Run Time"`
|
||||
HypervisorVirtualProcessorTotalRunTimePercent float64 `perfdata:"% Total Run Time"`
|
||||
HypervisorVirtualProcessorRemoteRunTimePercent float64 `perfdata:"% Remote Run Time"`
|
||||
@@ -108,15 +108,15 @@ func (c *Collector) collectHypervisorVirtualProcessor(ch chan<- prometheus.Metri
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.hypervisorVirtualProcessorTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
data.HypervisorVirtualProcessorGuestIdleTimePercent,
|
||||
vmName, coreID, "guest_idle",
|
||||
data.HypervisorVirtualProcessorGuestRunTimePercent,
|
||||
vmName, coreID, "guest",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.hypervisorVirtualProcessorTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
data.HypervisorVirtualProcessorGuestIdleTimePercent,
|
||||
vmName, coreID, "guest_idle",
|
||||
data.HypervisorVirtualProcessorRemoteRunTimePercent,
|
||||
vmName, coreID, "remote",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -167,7 +167,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
prometheus.Labels{"version": fmt.Sprintf("%d.%d", c.iisVersion.major, c.iisVersion.minor)},
|
||||
)
|
||||
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.buildWebService(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to build Web Service collector: %w", err))
|
||||
@@ -247,7 +247,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
1,
|
||||
)
|
||||
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectWebService(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect Web Service metrics: %w", err))
|
||||
|
||||
@@ -150,13 +150,6 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
c.logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
c.information = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"A metric with a constant '1' value labeled with logical disk information",
|
||||
@@ -281,6 +274,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ type Config struct{}
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
// Deprecated: Use windows_terminal_services_session_info instead.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
@@ -64,10 +65,16 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
logger.Warn("The logon collector will be removed mid 2025. "+
|
||||
"See https://github.com/prometheus-community/windows_exporter/pull/1957 for more information. If you see values in this collector"+
|
||||
" that you need, please open an issue to discuss how to get them into the new collector.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
|
||||
c.sessionInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "session_logon_timestamp_seconds"),
|
||||
"timestamp of the logon session in seconds.",
|
||||
"Deprecated. Use windows_terminal_services_session_info instead.",
|
||||
[]string{"id", "username", "domain", "type"},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -110,13 +110,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Memory collector: %w", err)
|
||||
}
|
||||
|
||||
c.availableBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
|
||||
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
|
||||
@@ -340,13 +333,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Memory collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectPDH(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting memory metrics: %w", err))
|
||||
@@ -390,6 +390,8 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -122,7 +122,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
|
||||
c.miSession = miSession
|
||||
|
||||
errs := make([]error, 0, 5)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorCluster) {
|
||||
if err := c.buildCluster(); err != nil {
|
||||
@@ -227,7 +227,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
wg.Wait()
|
||||
close(errCh)
|
||||
|
||||
errs := make([]error, 0, 5)
|
||||
errs := make([]error, 0)
|
||||
|
||||
for err := range errCh {
|
||||
errs = append(errs, err)
|
||||
|
||||
@@ -74,13 +74,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
|
||||
}
|
||||
|
||||
c.bytesInJournalQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"),
|
||||
"Size of queue journal in bytes",
|
||||
@@ -106,6 +99,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -333,7 +333,7 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) {
|
||||
return nil, fmt.Errorf("couldn't get instance info: %w", err)
|
||||
}
|
||||
|
||||
instance, err := newMssqlInstance(instanceVersion)
|
||||
instance, err := newMssqlInstance(instanceName, instanceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -348,14 +348,14 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) {
|
||||
|
||||
// mssqlGetPerfObjectName returns the name of the Windows Performance
|
||||
// Counter object for the given SQL instance and Collector.
|
||||
func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string) string {
|
||||
func (c *Collector) mssqlGetPerfObjectName(sqlInstance mssqlInstance, collector string) string {
|
||||
sb := strings.Builder{}
|
||||
|
||||
if sqlInstance == "MSSQLSERVER" {
|
||||
if sqlInstance.isFirstInstance {
|
||||
sb.WriteString("SQLServer:")
|
||||
} else {
|
||||
sb.WriteString("MSSQL$")
|
||||
sb.WriteString(sqlInstance)
|
||||
sb.WriteString(sqlInstance.name)
|
||||
sb.WriteString(":")
|
||||
}
|
||||
|
||||
@@ -369,8 +369,8 @@ func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string)
|
||||
func (c *Collector) collect(
|
||||
ch chan<- prometheus.Metric,
|
||||
collector string,
|
||||
perfDataCollectors map[string]*pdh.Collector,
|
||||
collectFn func(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error,
|
||||
perfDataCollectors map[mssqlInstance]*pdh.Collector,
|
||||
collectFn func(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error,
|
||||
) error {
|
||||
errs := make([]error, 0, len(perfDataCollectors))
|
||||
|
||||
@@ -386,11 +386,11 @@ func (c *Collector) collect(
|
||||
errs = append(errs, err)
|
||||
success = 0.0
|
||||
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance, duration),
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance.name, duration),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
} else {
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance, duration))
|
||||
c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance.name, duration))
|
||||
}
|
||||
|
||||
if collector == "" {
|
||||
@@ -401,13 +401,13 @@ func (c *Collector) collect(
|
||||
c.mssqlScrapeDurationDesc,
|
||||
prometheus.GaugeValue,
|
||||
duration.Seconds(),
|
||||
collector, sqlInstance,
|
||||
collector, sqlInstance.name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mssqlScrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
success,
|
||||
collector, sqlInstance,
|
||||
collector, sqlInstance.name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorAccessMethods struct {
|
||||
accessMethodsPerfDataCollectors map[string]*pdh.Collector
|
||||
accessMethodsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
accessMethodsPerfDataObject []perfDataCounterValuesAccessMethods
|
||||
|
||||
accessMethodsAUcleanupbatches *prometheus.Desc
|
||||
@@ -124,11 +124,11 @@ type perfDataCounterValuesAccessMethods struct {
|
||||
func (c *Collector) buildAccessMethods() error {
|
||||
var err error
|
||||
|
||||
c.accessMethodsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.accessMethodsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.accessMethodsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Access Methods"), nil)
|
||||
c.accessMethodsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -407,7 +407,7 @@ func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorAccessMethods, c.accessMethodsPerfDataCollectors, c.collectAccessMethodsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.accessMethodsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err)
|
||||
@@ -417,308 +417,308 @@ func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sq
|
||||
c.accessMethodsAUcleanupbatches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupbatchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsAUcleanups,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsByReferenceLobCreateCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobCreateCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsByReferenceLobUseCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobUseCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsCountLobReadahead,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsCountLobReadahead,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsCountPullInRow,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsCountPullInRow,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsCountPushOffRow,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsCountPushOffRow,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDeferreddroppedAUs,
|
||||
prometheus.GaugeValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedAUs,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDeferredDroppedrowsets,
|
||||
prometheus.GaugeValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedRowsets,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDroppedrowsetcleanups,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetCleanupsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsDroppedrowsetsskipped,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetsSkippedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsExtentDeallocations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsExtentDeallocationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsExtentsAllocated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsExtentsAllocatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFailedAUcleanupbatches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFailedAUCleanupBatchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFailedleafpagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFailedLeafPageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFailedtreepagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFailedTreePageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsForwardedRecords,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsForwardedRecordsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFreeSpacePageFetches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpacePageFetchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFreeSpaceScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpaceScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsFullScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsFullScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsIndexSearches,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsIndexSearchesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsInSysXactwaits,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsInSysXactWaitsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobHandleCreateCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleCreateCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobHandleDestroyCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleDestroyCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobSSProviderCreateCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderCreateCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobSSProviderDestroyCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderDestroyCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsLobSSProviderTruncationCount,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderTruncationCount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsMixedPageAllocations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsMixedPageAllocationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPageCompressionAttempts,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPageCompressionAttemptsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPageDeallocations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPageDeallocationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPagesAllocated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPagesAllocatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPagesCompressed,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPagesCompressedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsPageSplits,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsPageSplitsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsProbeScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsProbeScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsRangeScans,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsRangeScansPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsScanPointRevalidations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsScanPointRevalidationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsSkippedGhostedRecords,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsSkippedGhostedRecordsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsTableLockEscalations,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsTableLockEscalationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsUsedleafpagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsUsedLeafPageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsUsedtreepagecookie,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsUsedTreePageCookie,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorkfilesCreated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorkfilesCreatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorktablesCreated,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesCreatedPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorktablesFromCacheHits,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatio,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMethodsWorktablesFromCacheLookups,
|
||||
prometheus.CounterValue,
|
||||
c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatioBase,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorAvailabilityReplica struct {
|
||||
availabilityReplicaPerfDataCollectors map[string]*pdh.Collector
|
||||
availabilityReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
availabilityReplicaPerfDataObject []perfDataCounterValuesAvailabilityReplica
|
||||
|
||||
availReplicaBytesReceivedFromReplica *prometheus.Desc
|
||||
@@ -57,11 +57,11 @@ type perfDataCounterValuesAvailabilityReplica struct {
|
||||
func (c *Collector) buildAvailabilityReplica() error {
|
||||
var err error
|
||||
|
||||
c.availabilityReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.availabilityReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.availabilityReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Availability Replica"), pdh.InstancesAll)
|
||||
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -130,7 +130,7 @@ func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) erro
|
||||
return c.collect(ch, subCollectorAvailabilityReplica, c.availabilityReplicaPerfDataCollectors, c.collectAvailabilityReplicaInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.availabilityReplicaPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), err)
|
||||
@@ -141,63 +141,63 @@ func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metr
|
||||
c.availReplicaBytesReceivedFromReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaBytesReceivedFromReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaBytesSentToReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaBytesSentToReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaBytesSentToTransport,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaBytesSentToTransportPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaFlowControl,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaFlowControlPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaFlowControlTimeMS,
|
||||
prometheus.CounterValue,
|
||||
utils.MilliSecToSec(data.AvailReplicaFlowControlTimeMSPerSec),
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaReceivesFromReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaReceivesFromReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaResentMessages,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaResentMessagesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaSendsToReplica,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaSendsToReplicaPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availReplicaSendsToTransport,
|
||||
prometheus.CounterValue,
|
||||
data.AvailReplicaSendsToTransportPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorBufferManager struct {
|
||||
bufManPerfDataCollectors map[string]*pdh.Collector
|
||||
bufManPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
bufManPerfDataObject []perfDataCounterValuesBufMan
|
||||
|
||||
bufManBackgroundwriterpages *prometheus.Desc
|
||||
@@ -82,11 +82,11 @@ type perfDataCounterValuesBufMan struct {
|
||||
func (c *Collector) buildBufferManager() error {
|
||||
var err error
|
||||
|
||||
c.bufManPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.bufManPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.bufManPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Buffer Manager"), nil)
|
||||
c.bufManPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -238,7 +238,7 @@ func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorBufferManager, c.bufManPerfDataCollectors, c.collectBufferManagerInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.bufManPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), err)
|
||||
@@ -249,161 +249,161 @@ func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sq
|
||||
c.bufManBackgroundwriterpages,
|
||||
prometheus.CounterValue,
|
||||
data.BufManBackgroundWriterPagesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManBuffercachehits,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManBufferCacheHitRatio,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManBuffercachelookups,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManBufferCacheHitRatioBase,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManCheckpointpages,
|
||||
prometheus.CounterValue,
|
||||
data.BufManCheckpointPagesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManDatabasepages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManDatabasePages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionallocatedpages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionAllocatedPages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionfreepages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionFreePages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensioninuseaspercentage,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionInUseAsPercentage,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionoutstandingIOcounter,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionOutstandingIOCounter,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpageevictions,
|
||||
prometheus.CounterValue,
|
||||
data.BufManExtensionPageEvictionsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpagereads,
|
||||
prometheus.CounterValue,
|
||||
data.BufManExtensionPageReadsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpageunreferencedtime,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManExtensionPageUnreferencedTime,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManExtensionpagewrites,
|
||||
prometheus.CounterValue,
|
||||
data.BufManExtensionPageWritesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManFreeliststalls,
|
||||
prometheus.CounterValue,
|
||||
data.BufManFreeListStallsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManIntegralControllerSlope,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManIntegralControllerSlope,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManLazywrites,
|
||||
prometheus.CounterValue,
|
||||
data.BufManLazyWritesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagelifeexpectancy,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManPageLifeExpectancy,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagelookups,
|
||||
prometheus.CounterValue,
|
||||
data.BufManPageLookupsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagereads,
|
||||
prometheus.CounterValue,
|
||||
data.BufManPageReadsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManPagewrites,
|
||||
prometheus.CounterValue,
|
||||
data.BufManPageWritesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManReadaheadpages,
|
||||
prometheus.CounterValue,
|
||||
data.BufManReadaheadPagesPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManReadaheadtime,
|
||||
prometheus.CounterValue,
|
||||
data.BufManReadaheadTimePerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bufManTargetpages,
|
||||
prometheus.GaugeValue,
|
||||
data.BufManTargetPages,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
)
|
||||
|
||||
type collectorDatabases struct {
|
||||
databasesPerfDataCollectors map[string]*pdh.Collector
|
||||
databasesPerfDataCollectors2019 map[string]*pdh.Collector
|
||||
databasesPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
databasesPerfDataCollectors2019 map[mssqlInstance]*pdh.Collector
|
||||
databasesPerfDataObject []perfDataCounterValuesDatabases
|
||||
databasesPerfDataObject2019 []perfDataCounterValuesDatabases2019
|
||||
|
||||
@@ -141,18 +141,18 @@ type perfDataCounterValuesDatabases2019 struct {
|
||||
func (c *Collector) buildDatabases() error {
|
||||
var err error
|
||||
|
||||
c.databasesPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.databasesPerfDataCollectors2019 = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.databasesPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.databasesPerfDataCollectors2019 = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.databasesPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
|
||||
c.databasesPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
|
||||
if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) {
|
||||
c.databasesPerfDataCollectors2019[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll)
|
||||
c.databasesPerfDataCollectors2019[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Databases 2019 collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -458,7 +458,7 @@ func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error {
|
||||
)
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.databasesPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
|
||||
@@ -469,336 +469,336 @@ func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlIns
|
||||
c.databasesActiveTransactions,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesActiveTransactions,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesBackupPerRestoreThroughput,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesBackupPerRestoreThroughputPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesBulkCopyRows,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesBulkCopyRowsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesBulkCopyThroughput,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesBulkCopyThroughputPerSec*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesCommitTableEntries,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesCommitTableEntries,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesDataFilesSizeKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesDataFilesSizeKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesDBCCLogicalScanBytes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesDBCCLogicalScanBytesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesGroupCommitTime,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesGroupCommitTimePerSec/1000000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogBytesFlushed,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogBytesFlushedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogCacheHits,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogCacheHitRatio,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogCacheLookups,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogCacheHitRatioBase,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogCacheReads,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogCacheReadsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFilesSizeKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFilesSizeKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFilesUsedSizeKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFilesUsedSizeKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogFlushesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushWaits,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogFlushWaitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushWaitTime,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFlushWaitTime/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogFlushWriteTimeMS,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogFlushWriteTimeMS/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogGrowths,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogGrowths,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolCacheMisses,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolCacheMissesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolDiskReads,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolDiskReadsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolHashDeletes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolHashDeletesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolHashInserts,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolHashInsertsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolInvalidHashEntry,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolInvalidHashEntryPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolLogScanPushes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolLogScanPushesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolLogWriterPushes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolLogWriterPushesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolPushEmptyFreePool,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolPushEmptyFreePoolPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolPushLowMemory,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolPushLowMemoryPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolPushNoFreeBuffer,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolPushNoFreeBufferPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolReqBehindTrunc,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolReqBehindTruncPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolRequestsOldVLF,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolRequestsOldVLFPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolRequests,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesLogPoolRequestsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolTotalActiveLogSize,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogPoolTotalActiveLogSize,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogPoolTotalSharedPoolSize,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogPoolTotalSharedPoolSize,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogShrinks,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogShrinks,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesLogTruncations,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesLogTruncations,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesPercentLogUsed,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesPercentLogUsed,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesReplPendingXacts,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesReplPendingXacts,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesReplTransRate,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesReplTransRate,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesShrinkDataMovementBytes,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesShrinkDataMovementBytesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesTrackedTransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesTrackedTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesTransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesWriteTransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesWriteTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPControllerDLCLatencyPerFetch,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesXTPControllerDLCLatencyPerFetch,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPControllerDLCPeakLatency,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesXTPControllerDLCPeakLatency*1000000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPControllerLogProcessed,
|
||||
prometheus.CounterValue,
|
||||
data.DatabasesXTPControllerLogProcessedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.databasesXTPMemoryUsedKB,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesXTPMemoryUsedKB*1024,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.databasesPerfDataObject2019)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
|
||||
@@ -809,7 +809,7 @@ func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sq
|
||||
c.databasesActiveParallelRedoThreads,
|
||||
prometheus.GaugeValue,
|
||||
data.DatabasesActiveParallelRedoThreads,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorDatabaseReplica struct {
|
||||
dbReplicaPerfDataCollectors map[string]*pdh.Collector
|
||||
dbReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
dbReplicaPerfDataObject []perfDataCounterValuesDBReplica
|
||||
|
||||
dbReplicaDatabaseFlowControlDelay *prometheus.Desc
|
||||
@@ -86,11 +86,11 @@ type perfDataCounterValuesDBReplica struct {
|
||||
func (c *Collector) buildDatabaseReplica() error {
|
||||
var err error
|
||||
|
||||
c.dbReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.dbReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.dbReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Database Replica"), pdh.InstancesAll)
|
||||
c.dbReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -249,7 +249,7 @@ func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorDatabaseReplica, c.dbReplicaPerfDataCollectors, c.collectDatabaseReplicaInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.dbReplicaPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), err)
|
||||
@@ -260,168 +260,168 @@ func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric,
|
||||
c.dbReplicaDatabaseFlowControlDelay,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaDatabaseFlowControlDelay,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaDatabaseFlowControls,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaDatabaseFlowControlsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaFileBytesReceived,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaFileBytesReceivedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaGroupCommits,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaGroupCommitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaGroupCommitTime,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaGroupCommitTime,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogApplyPendingQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogApplyPendingQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogApplyReadyQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogApplyReadyQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogBytesCompressed,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogBytesCompressedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogBytesDecompressed,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogBytesDecompressedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogBytesReceived,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogBytesReceivedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogCompressionCachehits,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogCompressionCacheHitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogCompressionCachemisses,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogCompressionCacheMissesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogCompressions,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogCompressionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogDecompressions,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaLogDecompressionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogremainingforundo,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogRemainingForUndo,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaLogSendQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaLogSendQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaMirroredWritetransactions,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaMirroredWriteTransactionsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRecoveryQueue,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaRecoveryQueue,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedoblocked,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaRedoBlockedPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedoBytesRemaining,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaRedoBytesRemaining,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedoneBytes,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaRedoneBytesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaRedones,
|
||||
prometheus.CounterValue,
|
||||
data.DbReplicaRedonesPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaTotalLogrequiringundo,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaTotalLogRequiringUndo,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dbReplicaTransactionDelay,
|
||||
prometheus.GaugeValue,
|
||||
data.DbReplicaTransactionDelay/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorGeneralStatistics struct {
|
||||
genStatsPerfDataCollectors map[string]*pdh.Collector
|
||||
genStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
genStatsPerfDataObject []perfDataCounterValuesGenStats
|
||||
|
||||
genStatsActiveTempTables *prometheus.Desc
|
||||
@@ -84,11 +84,11 @@ type perfDataCounterValuesGenStats struct {
|
||||
func (c *Collector) buildGeneralStatistics() error {
|
||||
var err error
|
||||
|
||||
c.genStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.genStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.genStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "General Statistics"), nil)
|
||||
c.genStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error
|
||||
return c.collect(ch, subCollectorGeneralStatistics, c.genStatsPerfDataCollectors, c.collectGeneralStatisticsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.genStatsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err)
|
||||
@@ -257,168 +257,168 @@ func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric
|
||||
c.genStatsActiveTempTables,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsActiveTempTables,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsConnectionReset,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsConnectionResetPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsEventNotificationsDelayedDrop,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsEventNotificationsDelayedDrop,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsHTTPAuthenticatedRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsHTTPAuthenticatedRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsLogicalConnections,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsLogicalConnections,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsLogins,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsLoginsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsLogouts,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsLogoutsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsMarsDeadlocks,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsMarsDeadlocks,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsNonAtomicYieldRate,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsNonatomicYieldRate,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsProcessesBlocked,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsProcessesBlocked,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPEmptyRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPEmptyRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPMethodInvocations,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPMethodInvocations,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPSessionInitiateRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPSessionInitiateRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPSessionTerminateRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPSessionTerminateRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPSQLRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPSQLRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSOAPWSDLRequests,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSOAPWSDLRequests,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsSQLTraceIOProviderLockWaits,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsSQLTraceIOProviderLockWaits,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempDBRecoveryUnitID,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempdbRecoveryUnitID,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempDBrowSetID,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempdbRowsetID,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempTablesCreationRate,
|
||||
prometheus.CounterValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempTablesCreationRate,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTempTablesForDestruction,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTempTablesForDestruction,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTraceEventNotificationQueue,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTraceEventNotificationQueue,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsTransactions,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.genStatsUserConnections,
|
||||
prometheus.GaugeValue,
|
||||
c.genStatsPerfDataObject[0].GenStatsUserConnections,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorLocks struct {
|
||||
locksPerfDataCollectors map[string]*pdh.Collector
|
||||
locksPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
locksPerfDataObject []perfDataCounterValuesLocks
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerLocks
|
||||
@@ -55,11 +55,11 @@ type perfDataCounterValuesLocks struct {
|
||||
func (c *Collector) buildLocks() error {
|
||||
var err error
|
||||
|
||||
c.locksPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.locksPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.locksPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Locks"), pdh.InstancesAll)
|
||||
c.locksPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Locks"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -121,7 +121,7 @@ func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorLocks, c.locksPerfDataCollectors, c.collectLocksInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.locksPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Locks"), err)
|
||||
@@ -132,56 +132,56 @@ func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstanc
|
||||
c.locksWaitTime,
|
||||
prometheus.GaugeValue,
|
||||
data.LocksAverageWaitTimeMS/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksCount,
|
||||
prometheus.GaugeValue,
|
||||
data.LocksAverageWaitTimeMSBase/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockRequests,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockRequestsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockTimeouts,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockTimeoutsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockTimeoutstimeout0,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockTimeoutsTimeout0PerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockWaits,
|
||||
prometheus.CounterValue,
|
||||
data.LocksLockWaitsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksLockWaitTimeMS,
|
||||
prometheus.GaugeValue,
|
||||
data.LocksLockWaitTimeMS/1000.0,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.locksNumberOfDeadlocks,
|
||||
prometheus.CounterValue,
|
||||
data.LocksNumberOfDeadlocksPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorMemoryManager struct {
|
||||
memMgrPerfDataCollectors map[string]*pdh.Collector
|
||||
memMgrPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
memMgrPerfDataObject []perfDataCounterValuesMemMgr
|
||||
|
||||
memMgrConnectionMemoryKB *prometheus.Desc
|
||||
@@ -76,11 +76,11 @@ type perfDataCounterValuesMemMgr struct {
|
||||
func (c *Collector) buildMemoryManager() error {
|
||||
var err error
|
||||
|
||||
c.memMgrPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.memMgrPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.memMgrPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Memory Manager"), pdh.InstancesAll)
|
||||
c.memMgrPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -214,7 +214,7 @@ func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorMemoryManager, c.memMgrPerfDataCollectors, c.collectMemoryManagerInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.memMgrPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err)
|
||||
@@ -224,140 +224,140 @@ func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sq
|
||||
c.memMgrConnectionMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrConnectionMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrDatabaseCacheMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrDatabaseCacheMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrExternalBenefitOfMemory,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrExternalBenefitOfMemory,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrFreeMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrFreeMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrGrantedWorkspaceMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrGrantedWorkspaceMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockBlocks,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockBlocks,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockBlocksAllocated,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockBlocksAllocated,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockOwnerBlocks,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocks,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLockOwnerBlocksAllocated,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocksAllocated,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrLogPoolMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrLogPoolMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrMaximumWorkspaceMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrMaximumWorkspaceMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrMemoryGrantsOutstanding,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrMemoryGrantsOutstanding,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrMemoryGrantsPending,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrMemoryGrantsPending,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrOptimizerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrOptimizerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrReservedServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrReservedServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrSQLCacheMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrSQLCacheMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrStolenServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrStolenServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrTargetServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrTargetServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memMgrTotalServerMemoryKB,
|
||||
prometheus.GaugeValue,
|
||||
c.memMgrPerfDataObject[0].MemMgrTotalServerMemoryKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorSQLErrors struct {
|
||||
sqlErrorsPerfDataCollectors map[string]*pdh.Collector
|
||||
sqlErrorsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
sqlErrorsPerfDataObject []perfDataCounterValuesSqlErrors
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
|
||||
@@ -41,11 +41,11 @@ type perfDataCounterValuesSqlErrors struct {
|
||||
func (c *Collector) buildSQLErrors() error {
|
||||
var err error
|
||||
|
||||
c.sqlErrorsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.sqlErrorsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.sqlErrorsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Errors"), pdh.InstancesAll)
|
||||
c.sqlErrorsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorSQLErrors, c.sqlErrorsPerfDataCollectors, c.collectSQLErrorsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.sqlErrorsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), err)
|
||||
@@ -77,7 +77,7 @@ func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlIns
|
||||
c.sqlErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
data.SqlErrorsErrorsPerSec,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorSQLStats struct {
|
||||
sqlStatsPerfDataCollectors map[string]*pdh.Collector
|
||||
sqlStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
sqlStatsPerfDataObject []perfDataCounterValuesSqlStats
|
||||
|
||||
sqlStatsAutoParamAttempts *prometheus.Desc
|
||||
@@ -58,11 +58,11 @@ type perfDataCounterValuesSqlStats struct {
|
||||
func (c *Collector) buildSQLStats() error {
|
||||
var err error
|
||||
|
||||
c.sqlStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.sqlStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.sqlStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Statistics"), nil)
|
||||
c.sqlStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -142,7 +142,7 @@ func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorSQLStats, c.sqlStatsPerfDataCollectors, c.collectSQLStatsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.sqlStatsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err)
|
||||
@@ -152,77 +152,77 @@ func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInst
|
||||
c.sqlStatsAutoParamAttempts,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsAutoParamAttemptsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsBatchRequests,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsBatchRequestsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsFailedAutoParams,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsFailedAutoParamsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsForcedParameterizations,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsForcedParameterizationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsGuidedplanexecutions,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsGuidedplanexecutionsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsMisguidedplanexecutions,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsMisguidedplanexecutionsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSafeAutoParams,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSafeAutoParamsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSQLAttentionrate,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSQLAttentionrate,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSQLCompilations,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSQLCompilationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsSQLReCompilations,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsSQLReCompilationsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sqlStatsUnsafeAutoParams,
|
||||
prometheus.CounterValue,
|
||||
c.sqlStatsPerfDataObject[0].SqlStatsUnsafeAutoParamsPerSec,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorTransactions struct {
|
||||
transactionsPerfDataCollectors map[string]*pdh.Collector
|
||||
transactionsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
transactionsPerfDataObject []perfDataCounterValuesTransactions
|
||||
|
||||
transactionsTempDbFreeSpaceBytes *prometheus.Desc
|
||||
@@ -62,11 +62,11 @@ type perfDataCounterValuesTransactions struct {
|
||||
func (c *Collector) buildTransactions() error {
|
||||
var err error
|
||||
|
||||
c.transactionsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.transactionsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.transactionsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Transactions"), nil)
|
||||
c.transactionsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -160,7 +160,7 @@ func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error {
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
|
||||
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.transactionsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err)
|
||||
@@ -170,91 +170,91 @@ func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sql
|
||||
c.transactionsTempDbFreeSpaceBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsFreeSpaceintempdbKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsLongestTransactionRunningSeconds,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsLongestTransactionRunningTime,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsNonSnapshotVersionActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsNonSnapshotVersionTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsSnapshotTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsActive,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsUpdateConflictsTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsUpdateconflictratio,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsUpdateSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsUpdateSnapshotTransactions,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionCleanupRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionCleanuprateKBPers*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionGenerationRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionGenerationrateKBPers*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreSizeBytes,
|
||||
prometheus.GaugeValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreSizeKB*1024,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreUnits,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcount,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreCreationUnits,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcreation,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transactionsVersionStoreTruncationUnits,
|
||||
prometheus.CounterValue,
|
||||
c.transactionsPerfDataObject[0].TransactionsVersionStoreunittruncation,
|
||||
sqlInstance,
|
||||
sqlInstance.name,
|
||||
)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorWaitStats struct {
|
||||
waitStatsPerfDataCollectors map[string]*pdh.Collector
|
||||
waitStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector
|
||||
waitStatsPerfDataObject []perfDataCounterValuesWaitStats
|
||||
|
||||
waitStatsLockWaits *prometheus.Desc
|
||||
@@ -62,11 +62,11 @@ type perfDataCounterValuesWaitStats struct {
|
||||
func (c *Collector) buildWaitStats() error {
|
||||
var err error
|
||||
|
||||
c.waitStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances))
|
||||
c.waitStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
|
||||
for _, sqlInstance := range c.mssqlInstances {
|
||||
c.waitStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Wait Statistics"), pdh.InstancesAll)
|
||||
c.waitStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance.name, err))
|
||||
}
|
||||
@@ -153,7 +153,7 @@ func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error {
|
||||
return c.collect(ch, subCollectorWaitStats, c.waitStatsPerfDataCollectors, c.collectWaitStatsInstance)
|
||||
}
|
||||
|
||||
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error {
|
||||
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error {
|
||||
err := perfDataCollector.Collect(&c.waitStatsPerfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), err)
|
||||
@@ -164,84 +164,84 @@ func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlIns
|
||||
c.waitStatsLockWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsLockWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsMemoryGrantQueueWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsMemoryGrantQueueWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsThreadSafeMemoryObjectsWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsThreadSafeMemoryObjectsWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsLogWriteWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsLogWriteWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsLogBufferWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsLogBufferWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsNetworkIOWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsNetworkIOWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsPageIOLatchWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsPageIOLatchWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsPageLatchWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsPageLatchWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsNonPageLatchWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsNonpageLatchWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsWaitForTheWorkerWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsWaitForTheWorkerWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsWorkspaceSynchronizationWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsWorkspaceSynchronizationWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitStatsTransactionOwnershipWaits,
|
||||
prometheus.CounterValue,
|
||||
data.WaitStatsTransactionOwnershipWaits,
|
||||
sqlInstance, data.Name,
|
||||
sqlInstance.name, data.Name,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,13 +8,14 @@ import (
|
||||
)
|
||||
|
||||
type mssqlInstance struct {
|
||||
name string
|
||||
majorVersion mssqlServerMajorVersion
|
||||
patchVersion string
|
||||
edition string
|
||||
name string
|
||||
majorVersion mssqlServerMajorVersion
|
||||
patchVersion string
|
||||
edition string
|
||||
isFirstInstance bool
|
||||
}
|
||||
|
||||
func newMssqlInstance(name string) (mssqlInstance, error) {
|
||||
func newMssqlInstance(key, name string) (mssqlInstance, error) {
|
||||
regKey := fmt.Sprintf(`Software\Microsoft\Microsoft SQL Server\%s\Setup`, name)
|
||||
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, regKey, registry.QUERY_VALUE)
|
||||
@@ -39,10 +40,11 @@ func newMssqlInstance(name string) (mssqlInstance, error) {
|
||||
_, name, _ = strings.Cut(name, ".")
|
||||
|
||||
return mssqlInstance{
|
||||
edition: edition,
|
||||
name: name,
|
||||
majorVersion: newMajorVersion(patchVersion),
|
||||
patchVersion: patchVersion,
|
||||
edition: edition,
|
||||
name: name,
|
||||
majorVersion: newMajorVersion(patchVersion),
|
||||
patchVersion: patchVersion,
|
||||
isFirstInstance: key == "MSSQLSERVER",
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,12 @@ import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const Name = "net"
|
||||
const (
|
||||
Name = "net"
|
||||
|
||||
subCollectorMetrics = "metrics"
|
||||
subCollectorNicInfo = "nic_addresses"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
NicExclude *regexp.Regexp `yaml:"nic_exclude"`
|
||||
@@ -46,8 +51,8 @@ var ConfigDefaults = Config{
|
||||
NicExclude: types.RegExpEmpty,
|
||||
NicInclude: types.RegExpAny,
|
||||
CollectorsEnabled: []string{
|
||||
"metrics",
|
||||
"nic_addresses",
|
||||
subCollectorMetrics,
|
||||
subCollectorNicInfo,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -157,17 +162,12 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Network Interface collector: %w", err)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "addresses") {
|
||||
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
for _, collector := range c.config.CollectorsEnabled {
|
||||
if !slices.Contains([]string{subCollectorMetrics, subCollectorNicInfo}, collector) {
|
||||
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
|
||||
strings.Join([]string{subCollectorMetrics, subCollectorNicInfo}, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
c.bytesReceivedTotal = prometheus.NewDesc(
|
||||
@@ -261,21 +261,34 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Network Interface collector: %w", err)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) {
|
||||
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
|
||||
if err := c.collect(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "nic_addresses") {
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) {
|
||||
if err := c.collectNICAddresses(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting net addresses: %w", err))
|
||||
}
|
||||
|
||||
@@ -94,20 +94,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0, 2)
|
||||
|
||||
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accessAccepts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_accepts"),
|
||||
"(AccessAccepts)",
|
||||
@@ -260,13 +246,27 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectAccept(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting NPS accept data: %w", err))
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
@@ -52,7 +53,7 @@ type Collector struct {
|
||||
processesLimit *prometheus.Desc
|
||||
|
||||
// users
|
||||
// Deprecated: Use count(windows_logon_logon_type) instead.
|
||||
// Deprecated: Use `sum(windows_terminal_services_session_info{state="active"})` instead.
|
||||
users *prometheus.Desc
|
||||
|
||||
// physicalMemoryFreeBytes
|
||||
@@ -105,7 +106,7 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
logger.Warn("The os collect holds a number of deprecated metrics and will be removed mid 2025. "+
|
||||
logger.Warn("The os collector holds a number of deprecated metrics and will be removed mid 2025. "+
|
||||
"See https://github.com/prometheus-community/windows_exporter/pull/1596 for more information.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
@@ -117,6 +118,11 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
version := windows.RtlGetVersion()
|
||||
|
||||
// Microsoft has decided to keep the major version as "10" for Windows 11, including the product name.
|
||||
if version.BuildNumber >= 22000 {
|
||||
productName = strings.Replace(productName, " 10 ", " 11 ", 1)
|
||||
}
|
||||
|
||||
c.osInformation = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
`Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \"10\"; a build number greater than 22000 represents Windows 11.`,
|
||||
@@ -174,7 +180,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
)
|
||||
c.users = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "users"),
|
||||
"Deprecated: Use `count(windows_logon_logon_type)` instead.",
|
||||
"Deprecated: Use `sum(windows_terminal_services_session_info{state=\"active\"})` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
@@ -203,7 +209,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.collect(ch)
|
||||
|
||||
@@ -371,5 +377,5 @@ func (c *Collector) getWindowsVersion() (string, string, error) {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return productName, strconv.FormatUint(revision, 10), nil
|
||||
return strings.TrimSpace(productName), strconv.FormatUint(revision, 10), nil
|
||||
}
|
||||
|
||||
@@ -74,13 +74,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Paging File collector: %w", err)
|
||||
}
|
||||
|
||||
c.pagingLimitBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "limit_bytes"),
|
||||
"Number of bytes that can be stored in the operating system paging files. 0 (zero) indicates that there are no paging files",
|
||||
@@ -95,6 +88,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Paging File collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -127,13 +127,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
c.requestsQueued = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
|
||||
"The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)",
|
||||
@@ -218,6 +211,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -126,25 +126,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinter = miQuery
|
||||
|
||||
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinterJobs = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.printerJobStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "job_status"),
|
||||
"A counter of printer jobs by status",
|
||||
@@ -164,6 +145,25 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinter = miQuery
|
||||
|
||||
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinterJobs = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -102,18 +102,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)
|
||||
}
|
||||
|
||||
// net
|
||||
c.baseTCPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
|
||||
@@ -238,13 +226,27 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create RemoteFX Network collector: %w", err))
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectRemoteFXNetworkCount(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting RemoteFX Network metrics: %w", err))
|
||||
|
||||
@@ -366,9 +366,9 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri
|
||||
// This is realized by ask Service Manager directly.
|
||||
func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
|
||||
var (
|
||||
bytesNeeded uint32
|
||||
servicesReturned uint32
|
||||
err error
|
||||
additionalBytesNeeded uint32
|
||||
servicesReturned uint32
|
||||
err error
|
||||
)
|
||||
|
||||
for {
|
||||
@@ -381,7 +381,7 @@ func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, e
|
||||
windows.SERVICE_STATE_ALL,
|
||||
&c.queryAllServicesBuffer[0],
|
||||
currentBufferSize,
|
||||
&bytesNeeded,
|
||||
&additionalBytesNeeded,
|
||||
&servicesReturned,
|
||||
nil,
|
||||
nil,
|
||||
@@ -395,11 +395,14 @@ func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bytesNeeded <= currentBufferSize {
|
||||
return nil, fmt.Errorf("windows.EnumServicesStatusEx reports buffer too small (%d), but buffer is large enough (%d)", currentBufferSize, bytesNeeded)
|
||||
}
|
||||
/*
|
||||
Unlike other WIN32 API calls, additionalBytesNeeded is not returning the absolute amount bytes needed,
|
||||
but the additional bytes needed relative to the cbBufSize parameter.
|
||||
ref:
|
||||
https://stackoverflow.com/questions/14756347/when-calling-enumservicesstatusex-twice-i-still-get-eror-more-data-in-c
|
||||
*/
|
||||
|
||||
c.queryAllServicesBuffer = make([]byte, bytesNeeded)
|
||||
c.queryAllServicesBuffer = make([]byte, currentBufferSize+additionalBytesNeeded)
|
||||
}
|
||||
|
||||
if servicesReturned == 0 {
|
||||
|
||||
@@ -76,13 +76,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
|
||||
}
|
||||
|
||||
c.currentOpenFileCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_current_open_file_count"),
|
||||
"Current total count open files on the SMB Server Share",
|
||||
@@ -132,6 +125,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -91,13 +91,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
|
||||
}
|
||||
|
||||
// desc creates a new prometheus description
|
||||
desc := func(metricName string, description string, labels []string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
@@ -193,6 +186,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -157,13 +157,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("smtp collector is in an experimental state! Metrics for this collector have not been tested.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
@@ -421,6 +414,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -18,8 +18,10 @@ package system
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/pdh"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
@@ -37,6 +39,8 @@ var ConfigDefaults = Config{}
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
bootTimeTimestamp float64
|
||||
|
||||
perfDataCollector *pdh.Collector
|
||||
perfDataObject []perfDataCounterValues
|
||||
|
||||
@@ -46,8 +50,10 @@ type Collector struct {
|
||||
processes *prometheus.Desc
|
||||
processesLimit *prometheus.Desc
|
||||
systemCallsTotal *prometheus.Desc
|
||||
bootTime *prometheus.Desc
|
||||
threads *prometheus.Desc
|
||||
// Deprecated: Use windows_system_boot_time_timestamp instead
|
||||
bootTimeSeconds *prometheus.Desc
|
||||
bootTime *prometheus.Desc
|
||||
threads *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -77,19 +83,18 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create System collector: %w", err)
|
||||
}
|
||||
|
||||
c.bootTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp"),
|
||||
"Unix timestamp of system boot time",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.bootTimeSeconds = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"),
|
||||
"Deprecated: Use windows_system_boot_time_timestamp instead",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.contextSwitchesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_switches_total"),
|
||||
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
|
||||
@@ -134,6 +139,15 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
c.bootTimeTimestamp = float64(time.Now().Unix() - int64(kernel32.GetTickCount64()/1000))
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create System collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -170,17 +184,24 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
prometheus.CounterValue,
|
||||
c.perfDataObject[0].SystemCallsPerSec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bootTime,
|
||||
prometheus.GaugeValue,
|
||||
c.perfDataObject[0].SystemUpTime,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.threads,
|
||||
prometheus.GaugeValue,
|
||||
c.perfDataObject[0].Threads,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bootTimeSeconds,
|
||||
prometheus.GaugeValue,
|
||||
c.bootTimeTimestamp,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bootTime,
|
||||
prometheus.GaugeValue,
|
||||
c.bootTimeTimestamp,
|
||||
)
|
||||
|
||||
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
|
||||
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824
|
||||
// https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem
|
||||
|
||||
@@ -20,7 +20,6 @@ type perfDataCounterValues struct {
|
||||
ExceptionDispatchesPerSec float64 `perfdata:"Exception Dispatches/sec"`
|
||||
ProcessorQueueLength float64 `perfdata:"Processor Queue Length"`
|
||||
SystemCallsPerSec float64 `perfdata:"System Calls/sec"`
|
||||
SystemUpTime float64 `perfdata:"System Up Time"`
|
||||
Processes float64 `perfdata:"Processes"`
|
||||
Threads float64 `perfdata:"Threads"`
|
||||
}
|
||||
|
||||
@@ -118,18 +118,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
c.connectionFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "connection_failures_total"),
|
||||
"(TCP.ConnectionFailures)",
|
||||
@@ -190,13 +178,25 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
[]string{"af", "state"}, nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
|
||||
if err := c.collect(ch); err != nil {
|
||||
|
||||
@@ -133,25 +133,8 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
c.logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
c.connectionBrokerEnabled = isConnectionBrokerServer(miSession)
|
||||
|
||||
if c.connectionBrokerEnabled {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
|
||||
}
|
||||
} else {
|
||||
logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
|
||||
}
|
||||
|
||||
c.sessionInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "session_info"),
|
||||
"Terminal Services sessions info",
|
||||
@@ -243,8 +226,23 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.connectionBrokerEnabled = isConnectionBrokerServer(miSession)
|
||||
|
||||
if c.connectionBrokerEnabled {
|
||||
c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
|
||||
}
|
||||
} else {
|
||||
logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
|
||||
}
|
||||
|
||||
c.hServer, err = wtsapi32.WTSOpenServer("")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open WTS server: %w", err)
|
||||
@@ -261,7 +259,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 3)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectWTSSessions(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting terminal services session infos: %w", err))
|
||||
@@ -439,7 +437,7 @@ func (c *Collector) collectWTSSessions(ch chan<- prometheus.Metric) error {
|
||||
for _, session := range sessions {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ReplaceAll(session.SessionName, "#", " ")
|
||||
if n == "" || n == "Services" || n == "Console" {
|
||||
if n == "" || n == "Services" {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package textfile_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
@@ -44,7 +45,7 @@ func TestMultipleDirectories(t *testing.T) {
|
||||
})
|
||||
|
||||
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
|
||||
require.NoError(t, collectors.Build(logger))
|
||||
require.NoError(t, collectors.Build(context.Background(), logger))
|
||||
|
||||
metrics := make(chan prometheus.Metric)
|
||||
got := ""
|
||||
@@ -81,7 +82,7 @@ func TestDuplicateFileName(t *testing.T) {
|
||||
})
|
||||
|
||||
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
|
||||
require.NoError(t, collectors.Build(logger))
|
||||
require.NoError(t, collectors.Build(context.Background(), logger))
|
||||
|
||||
metrics := make(chan prometheus.Metric)
|
||||
got := ""
|
||||
|
||||
@@ -70,13 +70,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
|
||||
}
|
||||
|
||||
c.temperature = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"),
|
||||
"(Temperature)",
|
||||
@@ -102,6 +95,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
@@ -58,14 +59,17 @@ type Collector struct {
|
||||
perfDataCollector *pdh.Collector
|
||||
perfDataObject []perfDataCounterValues
|
||||
|
||||
currentTime *prometheus.Desc
|
||||
timezone *prometheus.Desc
|
||||
clockFrequencyAdjustmentPPBTotal *prometheus.Desc
|
||||
computedTimeOffset *prometheus.Desc
|
||||
ntpClientTimeSourceCount *prometheus.Desc
|
||||
ntpRoundTripDelay *prometheus.Desc
|
||||
ntpServerIncomingRequestsTotal *prometheus.Desc
|
||||
ntpServerOutgoingResponsesTotal *prometheus.Desc
|
||||
ppbCounterPresent bool
|
||||
|
||||
currentTime *prometheus.Desc
|
||||
timezone *prometheus.Desc
|
||||
clockFrequencyAdjustment *prometheus.Desc
|
||||
clockFrequencyAdjustmentPPB *prometheus.Desc
|
||||
computedTimeOffset *prometheus.Desc
|
||||
ntpClientTimeSourceCount *prometheus.Desc
|
||||
ntpRoundTripDelay *prometheus.Desc
|
||||
ntpServerIncomingRequestsTotal *prometheus.Desc
|
||||
ntpServerOutgoingResponsesTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -125,6 +129,9 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/prometheus-community/windows_exporter/issues/1891
|
||||
c.ppbCounterPresent = osversion.Build() >= osversion.LTSC2019
|
||||
|
||||
c.currentTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_timestamp_seconds"),
|
||||
"OperatingSystem.LocalDateTime",
|
||||
@@ -137,9 +144,15 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
[]string{"timezone"},
|
||||
nil,
|
||||
)
|
||||
c.clockFrequencyAdjustmentPPBTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment_ppb_total"),
|
||||
"Total adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units.",
|
||||
c.clockFrequencyAdjustment = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment"),
|
||||
"This value reflects the adjustment made to the local system clock frequency by W32Time in nominal clock units. This counter helps visualize the finer adjustments being made by W32time to synchronize the local clock.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.clockFrequencyAdjustmentPPB = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment_ppb"),
|
||||
"This value reflects the adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units. 1 PPB adjustment imples the system clock was adjusted at a rate of 1 nanosecond per second. The smallest possible adjustment can vary and can be expected to be in the order of 100's of PPB. This counter helps visualize the finer actions being taken by W32time to synchronize the local clock.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
@@ -187,7 +200,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorSystemTime) {
|
||||
if err := c.collectTime(ch); err != nil {
|
||||
@@ -232,14 +245,23 @@ func (c *Collector) collectTime(ch chan<- prometheus.Metric) error {
|
||||
func (c *Collector) collectNTP(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
|
||||
return fmt.Errorf("failed to collect time metrics: %w", err)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clockFrequencyAdjustmentPPBTotal,
|
||||
prometheus.CounterValue,
|
||||
c.perfDataObject[0].ClockFrequencyAdjustmentPPBTotal,
|
||||
c.clockFrequencyAdjustment,
|
||||
prometheus.GaugeValue,
|
||||
c.perfDataObject[0].ClockFrequencyAdjustment,
|
||||
)
|
||||
|
||||
if c.ppbCounterPresent {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clockFrequencyAdjustmentPPB,
|
||||
prometheus.GaugeValue,
|
||||
c.perfDataObject[0].ClockFrequencyAdjustmentPPB,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.computedTimeOffset,
|
||||
prometheus.GaugeValue,
|
||||
|
||||
@@ -16,10 +16,11 @@
|
||||
package time
|
||||
|
||||
type perfDataCounterValues struct {
|
||||
ClockFrequencyAdjustmentPPBTotal float64 `perfdata:"Clock Frequency Adjustment (ppb)"`
|
||||
ComputedTimeOffset float64 `perfdata:"Computed Time Offset"`
|
||||
NTPClientTimeSourceCount float64 `perfdata:"NTP Client Time Source Count"`
|
||||
NTPRoundTripDelay float64 `perfdata:"NTP Roundtrip Delay"`
|
||||
NTPServerIncomingRequestsTotal float64 `perfdata:"NTP Server Incoming Requests"`
|
||||
NTPServerOutgoingResponsesTotal float64 `perfdata:"NTP Server Outgoing Responses"`
|
||||
ClockFrequencyAdjustment float64 `perfdata:"Clock Frequency Adjustment"`
|
||||
ClockFrequencyAdjustmentPPB float64 `perfdata:"Clock Frequency Adjustment (ppb)" perfdata_min_build:"17763"`
|
||||
ComputedTimeOffset float64 `perfdata:"Computed Time Offset"`
|
||||
NTPClientTimeSourceCount float64 `perfdata:"NTP Client Time Source Count"`
|
||||
NTPRoundTripDelay float64 `perfdata:"NTP Roundtrip Delay"`
|
||||
NTPServerIncomingRequestsTotal float64 `perfdata:"NTP Server Incoming Requests"`
|
||||
NTPServerOutgoingResponsesTotal float64 `perfdata:"NTP Server Outgoing Responses"`
|
||||
}
|
||||
|
||||
@@ -80,18 +80,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
c.datagramsNoPortTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "datagram_no_port_total"),
|
||||
"Number of received UDP datagrams for which there was no application at the destination port",
|
||||
@@ -117,6 +105,18 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +230,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectCpu(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting vmware cpu metrics: %w", err))
|
||||
|
||||
@@ -38,8 +38,52 @@ type Resolver struct {
|
||||
flags map[string]string
|
||||
}
|
||||
|
||||
// NewResolver returns a Resolver structure.
|
||||
func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) (*Resolver, error) {
|
||||
// Parse parses the command line arguments and configuration files.
|
||||
func Parse(app *kingpin.Application, args []string) error {
|
||||
configFile := ParseConfigFile(args)
|
||||
if configFile != "" {
|
||||
resolver, err := NewConfigFileResolver(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load configuration file: %w", err)
|
||||
}
|
||||
|
||||
if err = resolver.Bind(app, args); err != nil {
|
||||
return fmt.Errorf("failed to bind configuration: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := app.Parse(args); err != nil {
|
||||
return fmt.Errorf("failed to parse flags: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConfigFile manually parses the configuration file from the command line arguments.
|
||||
func ParseConfigFile(args []string) string {
|
||||
for i, cliFlag := range args {
|
||||
if strings.HasPrefix(cliFlag, "--config.file=") {
|
||||
return strings.TrimPrefix(cliFlag, "--config.file=")
|
||||
}
|
||||
|
||||
if strings.HasPrefix(cliFlag, "-config.file=") {
|
||||
return strings.TrimPrefix(cliFlag, "-config.file=")
|
||||
}
|
||||
|
||||
if strings.HasSuffix(cliFlag, "-config.file") {
|
||||
if len(os.Args) <= i+1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return os.Args[i+1]
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewConfigFileResolver returns a Resolver structure.
|
||||
func NewConfigFileResolver(file string) (*Resolver, error) {
|
||||
flags := map[string]string{}
|
||||
|
||||
var (
|
||||
@@ -48,14 +92,15 @@ func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecure
|
||||
)
|
||||
|
||||
if strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") {
|
||||
logger.WarnContext(ctx, "Loading configuration file from URL is deprecated and will be removed in 0.31.0. Use a local file instead.")
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Warn("Loading configuration file from URL is deprecated and will be removed in 0.31.0. Use a local file instead.")
|
||||
|
||||
fileBytes, err = readFromURL(ctx, file, logger, insecureSkipVerify)
|
||||
fileBytes, err = readFromURL(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fileBytes, err = readFromFile(ctx, file, logger)
|
||||
fileBytes, err = readFromFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -79,9 +124,7 @@ func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecure
|
||||
return &Resolver{flags: flags}, nil
|
||||
}
|
||||
|
||||
func readFromFile(ctx context.Context, file string, logger *slog.Logger) ([]byte, error) {
|
||||
logger.InfoContext(ctx, "loading configuration file: "+file)
|
||||
|
||||
func readFromFile(file string) ([]byte, error) {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return nil, fmt.Errorf("failed to read configuration file: %w", err)
|
||||
}
|
||||
@@ -94,20 +137,14 @@ func readFromFile(ctx context.Context, file string, logger *slog.Logger) ([]byte
|
||||
return fileBytes, nil
|
||||
}
|
||||
|
||||
func readFromURL(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) ([]byte, error) {
|
||||
logger.InfoContext(ctx, "loading configuration file from URL: "+file)
|
||||
|
||||
func readFromURL(file string) ([]byte, error) {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, //nolint:gosec
|
||||
}
|
||||
|
||||
if insecureSkipVerify {
|
||||
logger.WarnContext(ctx, "Loading configuration file with TLS verification disabled")
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: tr}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, file, nil)
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, file, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
@@ -23,10 +23,11 @@ import (
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var (
|
||||
kernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
procGetDynamicTimeZoneInformationSys = kernel32.NewProc("GetDynamicTimeZoneInformation")
|
||||
kernelLocalFileTimeToFileTime = kernel32.NewProc("LocalFileTimeToFileTime")
|
||||
procGetDynamicTimeZoneInformationSys = modkernel32.NewProc("GetDynamicTimeZoneInformation")
|
||||
procKernelLocalFileTimeToFileTime = modkernel32.NewProc("LocalFileTimeToFileTime")
|
||||
procGetTickCount = modkernel32.NewProc("GetTickCount64")
|
||||
)
|
||||
|
||||
// SYSTEMTIME contains a date and time.
|
||||
@@ -70,9 +71,15 @@ func GetDynamicTimeZoneInformation() (DynamicTimezoneInformation, error) {
|
||||
}
|
||||
|
||||
func LocalFileTimeToFileTime(localFileTime, utcFileTime *windows.Filetime) uint32 {
|
||||
ret, _, _ := kernelLocalFileTimeToFileTime.Call(
|
||||
ret, _, _ := procKernelLocalFileTimeToFileTime.Call(
|
||||
uintptr(unsafe.Pointer(localFileTime)),
|
||||
uintptr(unsafe.Pointer(utcFileTime)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
func GetTickCount64() uint64 {
|
||||
ret, _, _ := procGetTickCount.Call()
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
@@ -20,10 +20,12 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/sys/windows"
|
||||
@@ -120,6 +122,11 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances
|
||||
continue
|
||||
}
|
||||
|
||||
secondValue := strings.HasSuffix(counterName, ",secondvalue")
|
||||
if secondValue {
|
||||
counterName = strings.TrimSuffix(counterName, ",secondvalue")
|
||||
}
|
||||
|
||||
var counter Counter
|
||||
if counter, ok = collector.counters[counterName]; !ok {
|
||||
counter = Counter{
|
||||
@@ -130,9 +137,7 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasSuffix(counterName, ",secondvalue") {
|
||||
counterName = strings.TrimSuffix(counterName, ",secondvalue")
|
||||
|
||||
if secondValue {
|
||||
counter.FieldIndexSecondValue = f.Index[0]
|
||||
} else {
|
||||
counter.FieldIndexValue = f.Index[0]
|
||||
@@ -151,7 +156,18 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances
|
||||
|
||||
var counterHandle pdhCounterHandle
|
||||
|
||||
//nolint:nestif
|
||||
if ret := AddEnglishCounter(handle, counterPath, 0, &counterHandle); ret != ErrorSuccess {
|
||||
if ret == CstatusNoCounter {
|
||||
if minOSBuildTag, ok := f.Tag.Lookup("perfdata_min_build"); ok {
|
||||
if minOSBuild, err := strconv.Atoi(minOSBuildTag); err == nil {
|
||||
if uint16(minOSBuild) > osversion.Build() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errs = append(errs, fmt.Errorf("failed to add counter %s: %w", counterPath, NewPdhError(ret)))
|
||||
|
||||
continue
|
||||
@@ -164,7 +180,7 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances
|
||||
}
|
||||
|
||||
// Get the info with the current buffer size
|
||||
bufLen := uint32(0)
|
||||
var bufLen uint32
|
||||
|
||||
if ret := GetCounterInfo(counterHandle, 0, &bufLen, nil); ret != MoreData {
|
||||
errs = append(errs, fmt.Errorf("GetCounterInfo: %w", NewPdhError(ret)))
|
||||
@@ -172,24 +188,27 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances
|
||||
continue
|
||||
}
|
||||
|
||||
if bufLen == 0 {
|
||||
buf := make([]byte, bufLen)
|
||||
if len(buf) == 0 {
|
||||
errs = append(errs, errors.New("GetCounterInfo: buffer length is zero"))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
buf := make([]byte, bufLen)
|
||||
if ret := GetCounterInfo(counterHandle, 0, &bufLen, &buf[0]); ret != ErrorSuccess {
|
||||
errs = append(errs, fmt.Errorf("GetCounterInfo: %w", NewPdhError(ret)))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
ci := (*CounterInfo)(unsafe.Pointer(&buf[0]))
|
||||
counter.Type = ci.DwType
|
||||
counter.Desc = windows.UTF16PtrToString(ci.SzExplainText)
|
||||
counter.Desc = windows.UTF16PtrToString(ci.SzExplainText)
|
||||
counterInfo := (*CounterInfo)(unsafe.Pointer(&buf[0]))
|
||||
if counterInfo == nil {
|
||||
errs = append(errs, errors.New("GetCounterInfo: counter info is nil"))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
counter.Type = counterInfo.DwType
|
||||
if val, ok := SupportedCounterTypes[counter.Type]; ok {
|
||||
counter.MetricType = val
|
||||
} else {
|
||||
|
||||
@@ -18,4 +18,5 @@ import "errors"
|
||||
var (
|
||||
ErrCollectorNotInitialized = errors.New("collector not initialized")
|
||||
ErrNoData = errors.New("no data")
|
||||
ErrNoDataUnexpected = errors.New("no data")
|
||||
)
|
||||
|
||||
@@ -31,6 +31,7 @@ func NewCounter(lastValue uint32) Counter {
|
||||
|
||||
func (c *Counter) AddValue(value uint32) {
|
||||
c.totalValue += float64(value - c.lastValue)
|
||||
c.lastValue = value
|
||||
}
|
||||
|
||||
func (c *Counter) Value() float64 {
|
||||
|
||||
@@ -20,20 +20,36 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/utils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCounter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := utils.NewCounter(0)
|
||||
assert.Equal(t, 0.0, c.Value()) //nolint:testifylint
|
||||
require.Equal(t, 0.0, c.Value()) //nolint:testifylint
|
||||
|
||||
c.AddValue(1)
|
||||
c.AddValue(10)
|
||||
|
||||
assert.Equal(t, 1.0, c.Value()) //nolint:testifylint
|
||||
require.Equal(t, 10.0, c.Value()) //nolint:testifylint
|
||||
|
||||
c.AddValue(math.MaxUint32)
|
||||
c.AddValue(50)
|
||||
|
||||
assert.Equal(t, float64(math.MaxUint32)+1.0, c.Value()) //nolint:testifylint
|
||||
require.Equal(t, 50.0, c.Value()) //nolint:testifylint
|
||||
|
||||
c.AddValue(math.MaxUint32 - 10)
|
||||
|
||||
require.Equal(t, float64(math.MaxUint32)-10, c.Value()) //nolint:testifylint
|
||||
|
||||
c.AddValue(20)
|
||||
|
||||
require.Equal(t, float64(math.MaxUint32)+21, c.Value()) //nolint:testifylint
|
||||
|
||||
c.AddValue(40)
|
||||
|
||||
require.Equal(t, float64(math.MaxUint32)+41, c.Value()) //nolint:testifylint
|
||||
|
||||
c.AddValue(math.MaxUint32 - 10)
|
||||
|
||||
require.Equal(t, float64(math.MaxUint32)*2-9, c.Value()) //nolint:testifylint
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
@@ -47,7 +48,7 @@ func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, co
|
||||
}
|
||||
|
||||
collectors := collector.New(map[string]collector.Collector{name: c})
|
||||
require.NoError(b, collectors.Build(logger))
|
||||
require.NoError(b, collectors.Build(context.Background(), logger))
|
||||
|
||||
metrics := make(chan prometheus.Metric)
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ func (c *Collection) Enable(enabledCollectors []string) error {
|
||||
// Build To be called by the exporter for collector initialization.
|
||||
// Instead, fail fast, it will try to build all collectors and return all errors.
|
||||
// errors are joined with errors.Join.
|
||||
func (c *Collection) Build(logger *slog.Logger) error {
|
||||
func (c *Collection) Build(ctx context.Context, logger *slog.Logger) error {
|
||||
c.startTime = gotime.Now()
|
||||
|
||||
err := c.initMI()
|
||||
@@ -236,7 +236,7 @@ func (c *Collection) Build(logger *slog.Logger) error {
|
||||
errors.Is(err, pdh.NewPdhError(pdh.CstatusNoObject)) ||
|
||||
errors.Is(err, pdh.NewPdhError(pdh.CstatusNoCounter)) ||
|
||||
errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE) {
|
||||
logger.LogAttrs(context.Background(), slog.LevelWarn, "couldn't initialize collector", slog.Any("err", err))
|
||||
logger.LogAttrs(ctx, slog.LevelWarn, "couldn't initialize collector", slog.Any("err", err))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ windows_exporter_collector_timeout{collector="udp"} 0
|
||||
# TYPE windows_logical_disk_write_seconds_total counter
|
||||
# HELP windows_logical_disk_writes_total The number of write operations on the disk (LogicalDisk.DiskWritesPerSec)
|
||||
# TYPE windows_logical_disk_writes_total counter
|
||||
# HELP windows_logon_session_logon_timestamp_seconds timestamp of the logon session in seconds.
|
||||
# HELP windows_logon_session_logon_timestamp_seconds Deprecated. Use windows_terminal_services_session_info instead.
|
||||
# TYPE windows_logon_session_logon_timestamp_seconds gauge
|
||||
# HELP windows_memory_available_bytes The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists (AvailableBytes)
|
||||
# TYPE windows_memory_available_bytes gauge
|
||||
@@ -307,7 +307,7 @@ windows_exporter_collector_timeout{collector="udp"} 0
|
||||
# TYPE windows_os_time gauge
|
||||
# HELP windows_os_timezone Deprecated: Use windows_time_timezone instead.
|
||||
# TYPE windows_os_timezone gauge
|
||||
# HELP windows_os_users Deprecated: Use `count(windows_logon_logon_type)` instead.
|
||||
# HELP windows_os_users Deprecated: Use `sum(windows_terminal_services_session_info{state="active"})` instead.
|
||||
# TYPE windows_os_users gauge
|
||||
# HELP windows_os_virtual_memory_bytes Deprecated: Use `windows_memory_commit_limit` instead.
|
||||
# TYPE windows_os_virtual_memory_bytes gauge
|
||||
@@ -389,7 +389,9 @@ windows_service_state{name="Themes",state="running"} 1
|
||||
windows_service_state{name="Themes",state="start pending"} 0
|
||||
windows_service_state{name="Themes",state="stop pending"} 0
|
||||
windows_service_state{name="Themes",state="stopped"} 0
|
||||
# HELP windows_system_boot_time_timestamp_seconds Unix timestamp of system boot time
|
||||
# HELP windows_system_boot_time_timestamp Unix timestamp of system boot time
|
||||
# TYPE windows_system_boot_time_timestamp gauge
|
||||
# HELP windows_system_boot_time_timestamp_seconds Deprecated: Use windows_system_boot_time_timestamp instead
|
||||
# TYPE windows_system_boot_time_timestamp_seconds gauge
|
||||
# HELP windows_system_context_switches_total Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)
|
||||
# TYPE windows_system_context_switches_total counter
|
||||
@@ -427,8 +429,10 @@ windows_service_state{name="Themes",state="stopped"} 0
|
||||
# TYPE windows_tcp_segments_total counter
|
||||
# HELP windows_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
|
||||
# TYPE windows_textfile_mtime_seconds gauge
|
||||
# HELP windows_time_clock_frequency_adjustment_ppb_total Total adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units.
|
||||
# TYPE windows_time_clock_frequency_adjustment_ppb_total counter
|
||||
# HELP windows_time_clock_frequency_adjustment This value reflects the adjustment made to the local system clock frequency by W32Time in nominal clock units. This counter helps visualize the finer adjustments being made by W32time to synchronize the local clock.
|
||||
# TYPE windows_time_clock_frequency_adjustment gauge
|
||||
# HELP windows_time_clock_frequency_adjustment_ppb This value reflects the adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units. 1 PPB adjustment imples the system clock was adjusted at a rate of 1 nanosecond per second. The smallest possible adjustment can vary and can be expected to be in the order of 100's of PPB. This counter helps visualize the finer actions being taken by W32time to synchronize the local clock.
|
||||
# TYPE windows_time_clock_frequency_adjustment_ppb gauge
|
||||
# HELP windows_time_computed_time_offset_seconds Absolute time offset between the system clock and the chosen time source, in seconds
|
||||
# TYPE windows_time_computed_time_offset_seconds gauge
|
||||
# HELP windows_time_current_timestamp_seconds OperatingSystem.LocalDateTime
|
||||
|
||||
Reference in New Issue
Block a user