From 6f0209ddb7a2cf4188dc0d9d36731bd0a073679a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Wed, 12 Mar 2025 08:28:30 +0100 Subject: [PATCH 01/14] time: `windows_time_clock_frequency_adjustment_ppb_total` -> `windows_time_clock_frequency_adjustment_ppb` and add `windows_time_clock_frequency_adjustment` metric for Win2016 (#1910) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke Signed-off-by: Jan-Otto Kröpke (cherry picked from commit d6196c5c6b2637a80fe67ddf13a9a80b6b0e1f4b) --- .editorconfig | 2 +- docs/collector.time.md | 21 +++++++------ internal/collector/time/time.go | 52 +++++++++++++++++++++++--------- internal/collector/time/types.go | 13 ++++---- internal/pdh/collector.go | 13 ++++++++ tools/e2e-output.txt | 6 ++-- 6 files changed, 73 insertions(+), 34 deletions(-) diff --git a/.editorconfig b/.editorconfig index 688f6373..13e98bb9 100644 --- a/.editorconfig +++ b/.editorconfig @@ -13,4 +13,4 @@ indent_size = 4 [*.{yml,yaml}] indent_style = space -indent_size = 2 \ No newline at end of file +indent_size = 2 diff --git a/docs/collector.time.md b/docs/collector.time.md index 37b8c889..da1873da 100644 --- a/docs/collector.time.md +++ b/docs/collector.time.md @@ -21,16 +21,17 @@ Matching is case-sensitive. ## Metrics -| Name | Description | Type | Labels | -|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------| -| `windows_time_clock_frequency_adjustment_ppb_total` | Total adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | counter | None | -| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None | -| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None | -| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None | -| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None | -| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None | -| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None | -| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` | +| Name | Description | Type | Labels | +|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------| +| `windows_time_clock_frequency_adjustment` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None | +| `windows_time_clock_frequency_adjustment_ppb` | Adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | gauge | None | +| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None | +| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None | +| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None | +| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None | +| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None | +| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None | +| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` | ### Example metric _This collector does not yet have explained examples, we would appreciate your help adding them!_ diff --git a/internal/collector/time/time.go b/internal/collector/time/time.go index b60763d0..6c8273f0 100644 --- a/internal/collector/time/time.go +++ b/internal/collector/time/time.go @@ -23,6 +23,7 @@ import ( "strings" "time" + "github.com/Microsoft/hcsshim/osversion" "github.com/alecthomas/kingpin/v2" "github.com/prometheus-community/windows_exporter/internal/headers/kernel32" "github.com/prometheus-community/windows_exporter/internal/mi" @@ -58,14 +59,17 @@ type Collector struct { perfDataCollector *pdh.Collector perfDataObject []perfDataCounterValues - currentTime *prometheus.Desc - timezone *prometheus.Desc - clockFrequencyAdjustmentPPBTotal *prometheus.Desc - computedTimeOffset *prometheus.Desc - ntpClientTimeSourceCount *prometheus.Desc - ntpRoundTripDelay *prometheus.Desc - ntpServerIncomingRequestsTotal *prometheus.Desc - ntpServerOutgoingResponsesTotal *prometheus.Desc + ppbCounterPresent bool + + currentTime *prometheus.Desc + timezone *prometheus.Desc + clockFrequencyAdjustment *prometheus.Desc + clockFrequencyAdjustmentPPB *prometheus.Desc + computedTimeOffset *prometheus.Desc + ntpClientTimeSourceCount *prometheus.Desc + ntpRoundTripDelay *prometheus.Desc + ntpServerIncomingRequestsTotal *prometheus.Desc + ntpServerOutgoingResponsesTotal *prometheus.Desc } func New(config *Config) *Collector { @@ -125,6 +129,9 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { } } + // https://github.com/prometheus-community/windows_exporter/issues/1891 + c.ppbCounterPresent = osversion.Build() >= osversion.LTSC2019 + c.currentTime = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "current_timestamp_seconds"), "OperatingSystem.LocalDateTime", @@ -137,9 +144,15 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { []string{"timezone"}, nil, ) - c.clockFrequencyAdjustmentPPBTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment_ppb_total"), - "Total adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units.", + c.clockFrequencyAdjustment = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment"), + "This value reflects the adjustment made to the local system clock frequency by W32Time in nominal clock units. This counter helps visualize the finer adjustments being made by W32time to synchronize the local clock.", + nil, + nil, + ) + c.clockFrequencyAdjustmentPPB = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment_ppb"), + "This value reflects the adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units. 1 PPB adjustment imples the system clock was adjusted at a rate of 1 nanosecond per second. The smallest possible adjustment can vary and can be expected to be in the order of 100's of PPB. This counter helps visualize the finer actions being taken by W32time to synchronize the local clock.", nil, nil, ) @@ -232,14 +245,23 @@ func (c *Collector) collectTime(ch chan<- prometheus.Metric) error { func (c *Collector) collectNTP(ch chan<- prometheus.Metric) error { err := c.perfDataCollector.Collect(&c.perfDataObject) if err != nil { - return fmt.Errorf("failed to collect VM Memory metrics: %w", err) + return fmt.Errorf("failed to collect time metrics: %w", err) } ch <- prometheus.MustNewConstMetric( - c.clockFrequencyAdjustmentPPBTotal, - prometheus.CounterValue, - c.perfDataObject[0].ClockFrequencyAdjustmentPPBTotal, + c.clockFrequencyAdjustment, + prometheus.GaugeValue, + c.perfDataObject[0].ClockFrequencyAdjustment, ) + + if c.ppbCounterPresent { + ch <- prometheus.MustNewConstMetric( + c.clockFrequencyAdjustmentPPB, + prometheus.GaugeValue, + c.perfDataObject[0].ClockFrequencyAdjustmentPPB, + ) + } + ch <- prometheus.MustNewConstMetric( c.computedTimeOffset, prometheus.GaugeValue, diff --git a/internal/collector/time/types.go b/internal/collector/time/types.go index 3bed16b7..edd548bd 100644 --- a/internal/collector/time/types.go +++ b/internal/collector/time/types.go @@ -16,10 +16,11 @@ package time type perfDataCounterValues struct { - ClockFrequencyAdjustmentPPBTotal float64 `perfdata:"Clock Frequency Adjustment (ppb)"` - ComputedTimeOffset float64 `perfdata:"Computed Time Offset"` - NTPClientTimeSourceCount float64 `perfdata:"NTP Client Time Source Count"` - NTPRoundTripDelay float64 `perfdata:"NTP Roundtrip Delay"` - NTPServerIncomingRequestsTotal float64 `perfdata:"NTP Server Incoming Requests"` - NTPServerOutgoingResponsesTotal float64 `perfdata:"NTP Server Outgoing Responses"` + ClockFrequencyAdjustment float64 `perfdata:"Clock Frequency Adjustment"` + ClockFrequencyAdjustmentPPB float64 `perfdata:"Clock Frequency Adjustment (ppb)" perfdata_min_build:"17763"` + ComputedTimeOffset float64 `perfdata:"Computed Time Offset"` + NTPClientTimeSourceCount float64 `perfdata:"NTP Client Time Source Count"` + NTPRoundTripDelay float64 `perfdata:"NTP Roundtrip Delay"` + NTPServerIncomingRequestsTotal float64 `perfdata:"NTP Server Incoming Requests"` + NTPServerOutgoingResponsesTotal float64 `perfdata:"NTP Server Outgoing Responses"` } diff --git a/internal/pdh/collector.go b/internal/pdh/collector.go index db8ab149..260ae988 100644 --- a/internal/pdh/collector.go +++ b/internal/pdh/collector.go @@ -20,10 +20,12 @@ import ( "fmt" "reflect" "slices" + "strconv" "strings" "sync" "unsafe" + "github.com/Microsoft/hcsshim/osversion" "github.com/prometheus-community/windows_exporter/internal/mi" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/windows" @@ -151,7 +153,18 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances var counterHandle pdhCounterHandle + //nolint:nestif if ret := AddEnglishCounter(handle, counterPath, 0, &counterHandle); ret != ErrorSuccess { + if ret == CstatusNoCounter { + if minOSBuildTag, ok := f.Tag.Lookup("perfdata_min_build"); ok { + if minOSBuild, err := strconv.Atoi(minOSBuildTag); err == nil { + if uint16(minOSBuild) > osversion.Build() { + continue + } + } + } + } + errs = append(errs, fmt.Errorf("failed to add counter %s: %w", counterPath, NewPdhError(ret))) continue diff --git a/tools/e2e-output.txt b/tools/e2e-output.txt index 30998eae..a5dcfec0 100644 --- a/tools/e2e-output.txt +++ b/tools/e2e-output.txt @@ -427,8 +427,10 @@ windows_service_state{name="Themes",state="stopped"} 0 # TYPE windows_tcp_segments_total counter # HELP windows_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE windows_textfile_mtime_seconds gauge -# HELP windows_time_clock_frequency_adjustment_ppb_total Total adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units. -# TYPE windows_time_clock_frequency_adjustment_ppb_total counter +# HELP windows_time_clock_frequency_adjustment This value reflects the adjustment made to the local system clock frequency by W32Time in nominal clock units. This counter helps visualize the finer adjustments being made by W32time to synchronize the local clock. +# TYPE windows_time_clock_frequency_adjustment gauge +# HELP windows_time_clock_frequency_adjustment_ppb This value reflects the adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units. 1 PPB adjustment imples the system clock was adjusted at a rate of 1 nanosecond per second. The smallest possible adjustment can vary and can be expected to be in the order of 100's of PPB. This counter helps visualize the finer actions being taken by W32time to synchronize the local clock. +# TYPE windows_time_clock_frequency_adjustment_ppb gauge # HELP windows_time_computed_time_offset_seconds Absolute time offset between the system clock and the chosen time source, in seconds # TYPE windows_time_computed_time_offset_seconds gauge # HELP windows_time_current_timestamp_seconds OperatingSystem.LocalDateTime From c300935170fe9a1905689add92bb97b3eaa4f4f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Sat, 5 Apr 2025 22:20:07 +0200 Subject: [PATCH 02/14] fix: windows_cpu_processor_utility_total is always 0 (#1966) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke (cherry picked from commit 9db4318ea9f15f62c82e055b7d44f631d874fdd3) Signed-off-by: Jan-Otto Kröpke --- .github/workflows/pr-check.yaml | 2 +- internal/pdh/collector.go | 20 +++++++++++++------- internal/utils/counter.go | 1 + internal/utils/counter_test.go | 28 ++++++++++++++++++++++------ 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/.github/workflows/pr-check.yaml b/.github/workflows/pr-check.yaml index 9fd20a28..23a037b2 100644 --- a/.github/workflows/pr-check.yaml +++ b/.github/workflows/pr-check.yaml @@ -37,7 +37,7 @@ jobs: - name: check run: | PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1) - if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then + if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then exit 0 fi diff --git a/internal/pdh/collector.go b/internal/pdh/collector.go index 260ae988..43b9d33e 100644 --- a/internal/pdh/collector.go +++ b/internal/pdh/collector.go @@ -122,6 +122,11 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances continue } + secondValue := strings.HasSuffix(counterName, ",secondvalue") + if secondValue { + counterName = strings.TrimSuffix(counterName, ",secondvalue") + } + var counter Counter if counter, ok = collector.counters[counterName]; !ok { counter = Counter{ @@ -132,9 +137,7 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances } } - if strings.HasSuffix(counterName, ",secondvalue") { - counterName = strings.TrimSuffix(counterName, ",secondvalue") - + if secondValue { counter.FieldIndexSecondValue = f.Index[0] } else { counter.FieldIndexValue = f.Index[0] @@ -198,11 +201,14 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances continue } - ci := (*CounterInfo)(unsafe.Pointer(&buf[0])) - counter.Type = ci.DwType - counter.Desc = windows.UTF16PtrToString(ci.SzExplainText) - counter.Desc = windows.UTF16PtrToString(ci.SzExplainText) + counterInfo := (*CounterInfo)(unsafe.Pointer(&buf[0])) + if counterInfo == nil { + errs = append(errs, errors.New("GetCounterInfo: counter info is nil")) + continue + } + + counter.Type = counterInfo.DwType if val, ok := SupportedCounterTypes[counter.Type]; ok { counter.MetricType = val } else { diff --git a/internal/utils/counter.go b/internal/utils/counter.go index bf280896..f4147c94 100644 --- a/internal/utils/counter.go +++ b/internal/utils/counter.go @@ -31,6 +31,7 @@ func NewCounter(lastValue uint32) Counter { func (c *Counter) AddValue(value uint32) { c.totalValue += float64(value - c.lastValue) + c.lastValue = value } func (c *Counter) Value() float64 { diff --git a/internal/utils/counter_test.go b/internal/utils/counter_test.go index cb55fcb4..12b3b4eb 100644 --- a/internal/utils/counter_test.go +++ b/internal/utils/counter_test.go @@ -20,20 +20,36 @@ import ( "testing" "github.com/prometheus-community/windows_exporter/internal/utils" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCounter(t *testing.T) { t.Parallel() c := utils.NewCounter(0) - assert.Equal(t, 0.0, c.Value()) //nolint:testifylint + require.Equal(t, 0.0, c.Value()) //nolint:testifylint - c.AddValue(1) + c.AddValue(10) - assert.Equal(t, 1.0, c.Value()) //nolint:testifylint + require.Equal(t, 10.0, c.Value()) //nolint:testifylint - c.AddValue(math.MaxUint32) + c.AddValue(50) - assert.Equal(t, float64(math.MaxUint32)+1.0, c.Value()) //nolint:testifylint + require.Equal(t, 50.0, c.Value()) //nolint:testifylint + + c.AddValue(math.MaxUint32 - 10) + + require.Equal(t, float64(math.MaxUint32)-10, c.Value()) //nolint:testifylint + + c.AddValue(20) + + require.Equal(t, float64(math.MaxUint32)+21, c.Value()) //nolint:testifylint + + c.AddValue(40) + + require.Equal(t, float64(math.MaxUint32)+41, c.Value()) //nolint:testifylint + + c.AddValue(math.MaxUint32 - 10) + + require.Equal(t, float64(math.MaxUint32)*2-9, c.Value()) //nolint:testifylint } From 9da6e56fcf91ee84a4ab61b91c30b4aed8e371af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Fri, 14 Mar 2025 10:57:47 +0100 Subject: [PATCH 03/14] fix: buffer length panic (#1936) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke (cherry picked from commit eecc6ce574a398cb93d730b5eb2ee0819946a469) --- internal/pdh/collector.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/pdh/collector.go b/internal/pdh/collector.go index 43b9d33e..9bd03133 100644 --- a/internal/pdh/collector.go +++ b/internal/pdh/collector.go @@ -180,7 +180,7 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances } // Get the info with the current buffer size - bufLen := uint32(0) + var bufLen uint32 if ret := GetCounterInfo(counterHandle, 0, &bufLen, nil); ret != MoreData { errs = append(errs, fmt.Errorf("GetCounterInfo: %w", NewPdhError(ret))) @@ -188,13 +188,13 @@ func NewCollectorWithReflection(resultType CounterType, object string, instances continue } - if bufLen == 0 { + buf := make([]byte, bufLen) + if len(buf) == 0 { errs = append(errs, errors.New("GetCounterInfo: buffer length is zero")) continue } - buf := make([]byte, bufLen) if ret := GetCounterInfo(counterHandle, 0, &bufLen, &buf[0]); ret != ErrorSuccess { errs = append(errs, fmt.Errorf("GetCounterInfo: %w", NewPdhError(ret))) From 3180315cff51af549a814946b51b45a0767a40e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Fri, 14 Mar 2025 19:28:48 +0100 Subject: [PATCH 04/14] hyperv: fix Windows Server 2016 compatibility (#1925) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke (cherry picked from commit bc1b40c6796f84aca4a54b199eefd13862565198) --- internal/collector/hyperv/hyperv.go | 14 +++++++++----- .../hyperv/hyperv_dynamic_memory_balancer.go | 17 ++++++++++------- .../hyperv/hyperv_dynamic_memory_vm.go | 17 ++++++++++------- .../hyperv_hypervisor_virtual_processor.go | 10 +++++----- 4 files changed, 34 insertions(+), 24 deletions(-) diff --git a/internal/collector/hyperv/hyperv.go b/internal/collector/hyperv/hyperv.go index ea4aeb8c..eb52d0e0 100644 --- a/internal/collector/hyperv/hyperv.go +++ b/internal/collector/hyperv/hyperv.go @@ -148,7 +148,7 @@ func (c *Collector) Close() error { return nil } -func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { +func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled)) c.closeFns = make([]func(), 0, len(c.config.CollectorsEnabled)) @@ -224,9 +224,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { close: c.perfDataCollectorVirtualNetworkAdapterDropReasons.Close, }, subCollectorVirtualSMB: { - build: c.buildVirtualSMB, - collect: c.collectVirtualSMB, - close: c.perfDataCollectorVirtualSMB.Close, + build: c.buildVirtualSMB, + collect: c.collectVirtualSMB, + close: c.perfDataCollectorVirtualSMB.Close, + minBuildNumber: osversion.LTSC2022, }, subCollectorVirtualStorageDevice: { build: c.buildVirtualStorageDevice, @@ -253,7 +254,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { } if buildNumber < subCollectors[name].minBuildNumber { - errs = append(errs, fmt.Errorf("collector %s requires Windows Server 2022 or newer", name)) + logger.Warn(fmt.Sprintf( + "collector %s requires windows build version %d. Current build version: %d", + name, subCollectors[name].minBuildNumber, buildNumber, + ), slog.String("collector", name)) continue } diff --git a/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go b/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go index 97a26b38..6bf49b32 100644 --- a/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go +++ b/internal/collector/hyperv/hyperv_dynamic_memory_balancer.go @@ -18,6 +18,7 @@ package hyperv import ( "fmt" + "github.com/Microsoft/hcsshim/osversion" "github.com/prometheus-community/windows_exporter/internal/pdh" "github.com/prometheus-community/windows_exporter/internal/types" "github.com/prometheus-community/windows_exporter/internal/utils" @@ -40,7 +41,7 @@ type perfDataCounterValuesDynamicMemoryBalancer struct { // Hyper-V Dynamic Memory Balancer metrics VmDynamicMemoryBalancerAvailableMemory float64 `perfdata:"Available Memory"` - VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing"` + VmDynamicMemoryBalancerAvailableMemoryForBalancing float64 `perfdata:"Available Memory For Balancing" perfdata_min_build:"17763"` VmDynamicMemoryBalancerAveragePressure float64 `perfdata:"Average Pressure"` VmDynamicMemoryBalancerSystemCurrentPressure float64 `perfdata:"System Current Pressure"` } @@ -96,12 +97,14 @@ func (c *Collector) collectDynamicMemoryBalancer(ch chan<- prometheus.Metric) er data.Name, ) - ch <- prometheus.MustNewConstMetric( - c.vmDynamicMemoryBalancerAvailableMemoryForBalancing, - prometheus.GaugeValue, - utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing), - data.Name, - ) + if osversion.Build() >= osversion.LTSC2019 { + ch <- prometheus.MustNewConstMetric( + c.vmDynamicMemoryBalancerAvailableMemoryForBalancing, + prometheus.GaugeValue, + utils.MBToBytes(data.VmDynamicMemoryBalancerAvailableMemoryForBalancing), + data.Name, + ) + } ch <- prometheus.MustNewConstMetric( c.vmDynamicMemoryBalancerAveragePressure, diff --git a/internal/collector/hyperv/hyperv_dynamic_memory_vm.go b/internal/collector/hyperv/hyperv_dynamic_memory_vm.go index 86162f88..64511553 100644 --- a/internal/collector/hyperv/hyperv_dynamic_memory_vm.go +++ b/internal/collector/hyperv/hyperv_dynamic_memory_vm.go @@ -18,6 +18,7 @@ package hyperv import ( "fmt" + "github.com/Microsoft/hcsshim/osversion" "github.com/prometheus-community/windows_exporter/internal/pdh" "github.com/prometheus-community/windows_exporter/internal/types" "github.com/prometheus-community/windows_exporter/internal/utils" @@ -47,7 +48,7 @@ type perfDataCounterValuesDynamicMemoryVM struct { // Hyper-V Dynamic Memory VM metrics VmMemoryAddedMemory float64 `perfdata:"Added Memory"` VmMemoryCurrentPressure float64 `perfdata:"Current Pressure"` - VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory"` + VmMemoryGuestAvailableMemory float64 `perfdata:"Guest Available Memory" perfdata_min_build:"17763"` VmMemoryGuestVisiblePhysicalMemory float64 `perfdata:"Guest Visible Physical Memory"` VmMemoryMaximumPressure float64 `perfdata:"Maximum Pressure"` VmMemoryMemoryAddOperations float64 `perfdata:"Memory Add Operations"` @@ -150,12 +151,14 @@ func (c *Collector) collectDynamicMemoryVM(ch chan<- prometheus.Metric) error { data.Name, ) - ch <- prometheus.MustNewConstMetric( - c.vmMemoryGuestAvailableMemory, - prometheus.GaugeValue, - utils.MBToBytes(data.VmMemoryGuestAvailableMemory), - data.Name, - ) + if osversion.Build() >= osversion.LTSC2019 { + ch <- prometheus.MustNewConstMetric( + c.vmMemoryGuestAvailableMemory, + prometheus.GaugeValue, + utils.MBToBytes(data.VmMemoryGuestAvailableMemory), + data.Name, + ) + } ch <- prometheus.MustNewConstMetric( c.vmMemoryGuestVisiblePhysicalMemory, diff --git a/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go b/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go index 78ef1561..32f74d19 100644 --- a/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go +++ b/internal/collector/hyperv/hyperv_hypervisor_virtual_processor.go @@ -40,7 +40,7 @@ type collectorHypervisorVirtualProcessor struct { type perfDataCounterValuesHypervisorVirtualProcessor struct { Name string - HypervisorVirtualProcessorGuestIdleTimePercent float64 `perfdata:"% Guest Idle Time"` + HypervisorVirtualProcessorGuestRunTimePercent float64 `perfdata:"% Guest Run Time"` HypervisorVirtualProcessorHypervisorRunTimePercent float64 `perfdata:"% Hypervisor Run Time"` HypervisorVirtualProcessorTotalRunTimePercent float64 `perfdata:"% Total Run Time"` HypervisorVirtualProcessorRemoteRunTimePercent float64 `perfdata:"% Remote Run Time"` @@ -108,15 +108,15 @@ func (c *Collector) collectHypervisorVirtualProcessor(ch chan<- prometheus.Metri ch <- prometheus.MustNewConstMetric( c.hypervisorVirtualProcessorTimeTotal, prometheus.CounterValue, - data.HypervisorVirtualProcessorGuestIdleTimePercent, - vmName, coreID, "guest_idle", + data.HypervisorVirtualProcessorGuestRunTimePercent, + vmName, coreID, "guest", ) ch <- prometheus.MustNewConstMetric( c.hypervisorVirtualProcessorTimeTotal, prometheus.CounterValue, - data.HypervisorVirtualProcessorGuestIdleTimePercent, - vmName, coreID, "guest_idle", + data.HypervisorVirtualProcessorRemoteRunTimePercent, + vmName, coreID, "remote", ) ch <- prometheus.MustNewConstMetric( From 7252d403aef36778410c3d5099ac7e17204aba2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Wed, 19 Mar 2025 22:50:54 +0100 Subject: [PATCH 05/14] fix: return Windows 11 as product name, if build number is >= 22000 (#1935) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke (cherry picked from commit 041c2cd170d2583b3ee688f3d01c708eef81d503) --- internal/collector/os/os.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/internal/collector/os/os.go b/internal/collector/os/os.go index c5cdbfe9..ddbca422 100644 --- a/internal/collector/os/os.go +++ b/internal/collector/os/os.go @@ -20,6 +20,7 @@ import ( "fmt" "log/slog" "strconv" + "strings" "time" "github.com/alecthomas/kingpin/v2" @@ -117,6 +118,11 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { version := windows.RtlGetVersion() + // Microsoft has decided to keep the major version as "10" for Windows 11, including the product name. + if version.BuildNumber >= 22000 { + productName = strings.Replace(productName, " 10 ", " 11 ", 1) + } + c.osInformation = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "info"), `Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \"10\"; a build number greater than 22000 represents Windows 11.`, @@ -371,5 +377,5 @@ func (c *Collector) getWindowsVersion() (string, string, error) { return "", "", err } - return productName, strconv.FormatUint(revision, 10), nil + return strings.TrimSpace(productName), strconv.FormatUint(revision, 10), nil } From b62c724977cac3d3d5c66804443fd434ed1abb72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Thu, 27 Mar 2025 07:26:51 +0100 Subject: [PATCH 06/14] service: fix windows.EnumServicesStatusEx reports buffer too small (#1954) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke (cherry picked from commit 63efa92be7398883072dbb8fc15d11fc09f58621) --- internal/collector/service/service.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/internal/collector/service/service.go b/internal/collector/service/service.go index 1812c137..94a147fb 100644 --- a/internal/collector/service/service.go +++ b/internal/collector/service/service.go @@ -366,9 +366,9 @@ func (c *Collector) collectService(ch chan<- prometheus.Metric, serviceName stri // This is realized by ask Service Manager directly. func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) { var ( - bytesNeeded uint32 - servicesReturned uint32 - err error + additionalBytesNeeded uint32 + servicesReturned uint32 + err error ) for { @@ -381,7 +381,7 @@ func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, e windows.SERVICE_STATE_ALL, &c.queryAllServicesBuffer[0], currentBufferSize, - &bytesNeeded, + &additionalBytesNeeded, &servicesReturned, nil, nil, @@ -395,11 +395,14 @@ func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, e return nil, err } - if bytesNeeded <= currentBufferSize { - return nil, fmt.Errorf("windows.EnumServicesStatusEx reports buffer too small (%d), but buffer is large enough (%d)", currentBufferSize, bytesNeeded) - } + /* + Unlike other WIN32 API calls, additionalBytesNeeded is not returning the absolute amount bytes needed, + but the additional bytes needed relative to the cbBufSize parameter. + ref: + https://stackoverflow.com/questions/14756347/when-calling-enumservicesstatusex-twice-i-still-get-eror-more-data-in-c + */ - c.queryAllServicesBuffer = make([]byte, bytesNeeded) + c.queryAllServicesBuffer = make([]byte, currentBufferSize+additionalBytesNeeded) } if servicesReturned == 0 { From fe17f5f597ac3c2293203759009bd5686ba497b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Fri, 4 Apr 2025 23:21:26 +0200 Subject: [PATCH 07/14] memory: fix panics if metrics does not exists (#1960) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke (cherry picked from commit ecc805f0fa8085c6298d7224d7491d6b83155fc6) Signed-off-by: Jan-Otto Kröpke --- cmd/windows_exporter/main.go | 41 ++-- cmd/windows_exporter/main_test.go | 188 ++++++++++++++++++ config.yaml | 25 +-- internal/collector/ad/ad.go | 14 +- internal/collector/adcs/adcs.go | 14 +- internal/collector/adfs/adfs.go | 14 +- internal/collector/cache/cache.go | 14 +- internal/collector/cpu/cpu.go | 14 +- internal/collector/cpu_info/cpu_info.go | 24 +-- internal/collector/dfsr/dfsr.go | 48 ++--- internal/collector/dhcp/dhcp.go | 152 +++++++------- internal/collector/diskdrive/diskdrive.go | 24 +-- internal/collector/dns/dns.go | 14 +- internal/collector/iis/iis.go | 4 +- .../collector/logical_disk/logical_disk.go | 14 +- internal/collector/memory/memory.go | 18 +- internal/collector/mscluster/mscluster.go | 4 +- internal/collector/msmq/msmq.go | 14 +- internal/collector/net/net.go | 38 ++-- internal/collector/nps/nps.go | 30 +-- internal/collector/os/os.go | 2 +- internal/collector/pagefile/pagefile.go | 14 +- .../collector/physical_disk/physical_disk.go | 14 +- internal/collector/printer/printer.go | 38 ++-- internal/collector/remote_fx/remote_fx.go | 30 +-- internal/collector/smb/smb.go | 14 +- internal/collector/smbclient/smbclient.go | 14 +- internal/collector/smtp/smtp.go | 14 +- internal/collector/system/system.go | 14 +- internal/collector/tcp/tcp.go | 26 +-- .../terminal_services/terminal_services.go | 34 ++-- .../collector/textfile/textfile_test_test.go | 4 +- internal/collector/thermalzone/thermalzone.go | 14 +- internal/collector/time/time.go | 2 +- internal/collector/udp/udp.go | 24 +-- internal/collector/vmware/vmware.go | 2 +- internal/types/errors.go | 1 + internal/utils/testutils/testutils.go | 2 +- pkg/collector/collection.go | 4 +- 39 files changed, 576 insertions(+), 399 deletions(-) create mode 100644 cmd/windows_exporter/main_test.go diff --git a/cmd/windows_exporter/main.go b/cmd/windows_exporter/main.go index a652160f..e0f86fdd 100644 --- a/cmd/windows_exporter/main.go +++ b/cmd/windows_exporter/main.go @@ -47,7 +47,11 @@ import ( ) func main() { - exitCode := run() + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) + + exitCode := run(ctx, os.Args[1:]) + + stop() // If we are running as a service, we need to signal the service control manager that we are done. if !IsService { @@ -60,9 +64,8 @@ func main() { <-serviceManagerFinishedCh } -func run() int { +func run(ctx context.Context, args []string) int { startTime := time.Now() - ctx := context.Background() app := kingpin.New("windows_exporter", "A metrics collector for Windows.") @@ -126,7 +129,7 @@ func run() int { // to load the specified file(s). if _, err := app.Parse(os.Args[1:]); err != nil { //nolint:sloglint // we do not have an logger yet - slog.Error("Failed to parse CLI args", + slog.LogAttrs(ctx, slog.LevelError, "Failed to parse CLI args", slog.Any("err", err), ) @@ -137,8 +140,7 @@ func run() int { logger, err := log.New(logConfig) if err != nil { - //nolint:sloglint // we do not have an logger yet - slog.Error("failed to create logger", + logger.LogAttrs(ctx, slog.LevelError, "failed to create logger", slog.Any("err", err), ) @@ -191,8 +193,8 @@ func run() int { logger.LogAttrs(ctx, slog.LevelDebug, "logging has Started") - if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil { - logger.Error("failed to set process priority", + if err = setPriorityWindows(ctx, logger, os.Getpid(), *processPriority); err != nil { + logger.LogAttrs(ctx, slog.LevelError, "failed to set process priority", slog.Any("err", err), ) @@ -201,7 +203,7 @@ func run() int { enabledCollectorList := expandEnabledCollectors(*enabledCollectors) if err := collectors.Enable(enabledCollectorList); err != nil { - logger.Error("couldn't enable collectors", + logger.LogAttrs(ctx, slog.LevelError, "couldn't enable collectors", slog.Any("err", err), ) @@ -209,9 +211,9 @@ func run() int { } // Initialize collectors before loading - if err = collectors.Build(logger); err != nil { + if err = collectors.Build(ctx, logger); err != nil { for _, err := range utils.SplitError(err) { - logger.Error("couldn't initialize collector", + logger.LogAttrs(ctx, slog.LevelError, "couldn't initialize collector", slog.Any("err", err), ) @@ -266,17 +268,14 @@ func run() int { close(errCh) }() - ctx, stop := signal.NotifyContext(ctx, os.Interrupt, os.Kill) - defer stop() - select { case <-ctx.Done(): - logger.Info("Shutting down windows_exporter via kill signal") + logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via kill signal") case <-stopCh: - logger.Info("Shutting down windows_exporter via service control") + logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via service control") case err := <-errCh: if err != nil { - logger.ErrorContext(ctx, "Failed to start windows_exporter", + logger.LogAttrs(ctx, slog.LevelError, "Failed to start windows_exporter", slog.Any("err", err), ) @@ -287,9 +286,9 @@ func run() int { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - _ = server.Shutdown(ctx) + _ = server.Shutdown(ctx) //nolint:contextcheck // create a new context for server shutdown - logger.InfoContext(ctx, "windows_exporter has shut down") + logger.LogAttrs(ctx, slog.LevelInfo, "windows_exporter has shut down") //nolint:contextcheck return 0 } @@ -312,7 +311,7 @@ func logCurrentUser(logger *slog.Logger) { } // setPriorityWindows sets the priority of the current process to the specified value. -func setPriorityWindows(logger *slog.Logger, pid int, priority string) error { +func setPriorityWindows(ctx context.Context, logger *slog.Logger, pid int, priority string) error { // Mapping of priority names to uin32 values required by windows.SetPriorityClass. priorityStringToInt := map[string]uint32{ "realtime": windows.REALTIME_PRIORITY_CLASS, @@ -330,7 +329,7 @@ func setPriorityWindows(logger *slog.Logger, pid int, priority string) error { return nil } - logger.LogAttrs(context.Background(), slog.LevelDebug, "setting process priority to "+priority) + logger.LogAttrs(ctx, slog.LevelDebug, "setting process priority to "+priority) // https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights handle, err := windows.OpenProcess( diff --git a/cmd/windows_exporter/main_test.go b/cmd/windows_exporter/main_test.go new file mode 100644 index 00000000..af81f454 --- /dev/null +++ b/cmd/windows_exporter/main_test.go @@ -0,0 +1,188 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package main + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "golang.org/x/sys/windows" +) + +//nolint:tparallel +func TestRun(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + name string + args []string + config string + metricsEndpoint string + exitCode int + }{ + { + name: "default", + args: []string{}, + metricsEndpoint: "http://127.0.0.1:9182/metrics", + }, + { + name: "web.listen-address", + args: []string{"--web.listen-address=127.0.0.1:8080"}, + metricsEndpoint: "http://127.0.0.1:8080/metrics", + }, + { + name: "web.listen-address", + args: []string{"--web.listen-address=127.0.0.1:8081", "--web.listen-address=[::1]:8081"}, + metricsEndpoint: "http://[::1]:8081/metrics", + }, + { + name: "config", + args: []string{"--config.file=config.yaml"}, + config: `{"web":{"listen-address":"127.0.0.1:8082"}}`, + metricsEndpoint: "http://127.0.0.1:8082/metrics", + }, + { + name: "web.listen-address with config", + args: []string{"--config.file=config.yaml", "--web.listen-address=127.0.0.1:8084"}, + config: `{"web":{"listen-address":"127.0.0.1:8083"}}`, + metricsEndpoint: "http://127.0.0.1:8084/metrics", + }, + } { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + if tc.config != "" { + // Create a temporary config file. + tmpfile, err := os.CreateTemp(t.TempDir(), "config-*.yaml") + require.NoError(t, err) + + t.Cleanup(func() { + require.NoError(t, tmpfile.Close()) + }) + + _, err = tmpfile.WriteString(tc.config) + require.NoError(t, err) + + for i, arg := range tc.args { + tc.args[i] = strings.ReplaceAll(arg, "config.yaml", tmpfile.Name()) + } + } + + exitCodeCh := make(chan int) + + var stdout string + + go func() { + stdout = captureOutput(t, func() { + // Simulate the service control manager signaling that we are done. + exitCodeCh <- run(ctx, tc.args) + }) + }() + + t.Cleanup(func() { + select { + case exitCode := <-exitCodeCh: + require.Equal(t, tc.exitCode, exitCode) + case <-time.After(2 * time.Second): + t.Fatalf("timed out waiting for exit code, want %d", tc.exitCode) + } + }) + + if tc.exitCode != 0 { + return + } + + uri, err := url.Parse(tc.metricsEndpoint) + require.NoError(t, err) + + err = waitUntilListening(t, "tcp", uri.Host) + require.NoError(t, err, "LOGS:\n%s", stdout) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, tc.metricsEndpoint, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err, "LOGS:\n%s", stdout) + require.Equal(t, http.StatusOK, resp.StatusCode) + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + err = resp.Body.Close() + require.NoError(t, err) + + require.NotEmpty(t, body) + require.Contains(t, string(body), "# HELP windows_exporter_build_info") + + cancel() + }) + } +} + +func captureOutput(tb testing.TB, f func()) string { + tb.Helper() + + orig := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + f() + + os.Stdout = orig + + _ = w.Close() + + out, _ := io.ReadAll(r) + + return string(out) +} + +func waitUntilListening(tb testing.TB, network, address string) error { + tb.Helper() + + var ( + conn net.Conn + err error + ) + + for range 10 { + conn, err = net.DialTimeout(network, address, 100*time.Millisecond) + if err == nil { + _ = conn.Close() + + return nil + } + + if errors.Is(err, windows.Errno(10061)) { + time.Sleep(50 * time.Millisecond) + + continue + } + } + + return fmt.Errorf("listener not listening: %w", err) +} diff --git a/config.yaml b/config.yaml index c4f16ee6..82dd77a9 100644 --- a/config.yaml +++ b/config.yaml @@ -1,23 +1,2 @@ -# example configuration file for windows_exporter - -collectors: - enabled: cpu,cpu_info,exchange,iis,logical_disk,logon,memory,net,os,performancecounter,process,remote_fx,service,system,tcp,time,terminal_services,textfile -collector: - service: - include: "windows_exporter" - performancecounter: - objects: |- - - name: photon_udp - object: "Photon Socket Server: UDP" - instances: ["*"] - counters: - - name: "UDP: Datagrams in" - metric: "photon_udp_datagrams" - labels: - direction: "in" - - name: "UDP: Datagrams out" - metric: "photon_udp_datagrams" - labels: - direction: "out" -log: - level: warn +web: + listen-address: ":9183" diff --git a/internal/collector/ad/ad.go b/internal/collector/ad/ad.go index ab48432d..ad728e81 100644 --- a/internal/collector/ad/ad.go +++ b/internal/collector/ad/ad.go @@ -130,13 +130,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create DirectoryServices collector: %w", err) - } - c.addressBookOperationsTotal = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"), "", @@ -511,6 +504,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create DirectoryServices collector: %w", err) + } + return nil } diff --git a/internal/collector/adcs/adcs.go b/internal/collector/adcs/adcs.go index ca1125ec..44a68e40 100644 --- a/internal/collector/adcs/adcs.go +++ b/internal/collector/adcs/adcs.go @@ -82,13 +82,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Certification Authority collector: %w", err) - } - c.requestsPerSecond = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "requests_total"), "Total certificate requests processed", @@ -168,6 +161,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Certification Authority collector: %w", err) + } + return nil } diff --git a/internal/collector/adfs/adfs.go b/internal/collector/adfs/adfs.go index 963233e2..3e17fdcc 100644 --- a/internal/collector/adfs/adfs.go +++ b/internal/collector/adfs/adfs.go @@ -112,13 +112,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil) - if err != nil { - return fmt.Errorf("failed to create AD FS collector: %w", err) - } - c.adLoginConnectionFailures = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"), "Total number of connection failures to an Active Directory domain controller", @@ -378,6 +371,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil) + if err != nil { + return fmt.Errorf("failed to create AD FS collector: %w", err) + } + return nil } diff --git a/internal/collector/cache/cache.go b/internal/collector/cache/cache.go index fe35c6d0..6af92de5 100644 --- a/internal/collector/cache/cache.go +++ b/internal/collector/cache/cache.go @@ -98,13 +98,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Cache collector: %w", err) - } - c.asyncCopyReadsTotal = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"), "(AsyncCopyReadsTotal)", @@ -280,6 +273,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Cache collector: %w", err) + } + return nil } diff --git a/internal/collector/cpu/cpu.go b/internal/collector/cpu/cpu.go index a1d7e93e..c8b28da6 100644 --- a/internal/collector/cpu/cpu.go +++ b/internal/collector/cpu/cpu.go @@ -89,15 +89,8 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - c.mu = sync.Mutex{} - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Processor Information collector: %w", err) - } - c.logicalProcessors = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "logical_processor"), "Total number of logical processors", @@ -186,6 +179,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { c.processorRTCValues = map[string]utils.Counter{} c.processorMPerfValues = map[string]utils.Counter{} + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Processor Information collector: %w", err) + } + return nil } diff --git a/internal/collector/cpu_info/cpu_info.go b/internal/collector/cpu_info/cpu_info.go index df513d60..daef6410 100644 --- a/internal/collector/cpu_info/cpu_info.go +++ b/internal/collector/cpu_info/cpu_info.go @@ -75,18 +75,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { - if miSession == nil { - return errors.New("miSession is nil") - } - - miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor") - if err != nil { - return fmt.Errorf("failed to create WMI query: %w", err) - } - - c.miQuery = miQuery - c.miSession = miSession - c.cpuInfo = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, "", Name), "Labelled CPU information as provided by Win32_Processor", @@ -148,6 +136,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { nil, ) + if miSession == nil { + return errors.New("miSession is nil") + } + + miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor") + if err != nil { + return fmt.Errorf("failed to create WMI query: %w", err) + } + + c.miQuery = miQuery + c.miSession = miSession + var dst []miProcessor if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil { return fmt.Errorf("WMI query failed: %w", err) diff --git a/internal/collector/dfsr/dfsr.go b/internal/collector/dfsr/dfsr.go index 7863f7d0..07f14943 100644 --- a/internal/collector/dfsr/dfsr.go +++ b/internal/collector/dfsr/dfsr.go @@ -160,29 +160,6 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.") - var err error - - if slices.Contains(c.config.CollectorsEnabled, "connection") { - c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err) - } - } - - if slices.Contains(c.config.CollectorsEnabled, "folder") { - c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err) - } - } - - if slices.Contains(c.config.CollectorsEnabled, "volume") { - c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err) - } - } - // connection c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"), @@ -473,13 +450,36 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + if slices.Contains(c.config.CollectorsEnabled, "connection") { + c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err) + } + } + + if slices.Contains(c.config.CollectorsEnabled, "folder") { + c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err) + } + } + + if slices.Contains(c.config.CollectorsEnabled, "volume") { + c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err) + } + } + return nil } // Collect implements the Collector interface. // Sends metric values for each metric to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 3) + errs := make([]error, 0) if slices.Contains(c.config.CollectorsEnabled, "connection") { errs = append(errs, c.collectPDHConnection(ch)) diff --git a/internal/collector/dhcp/dhcp.go b/internal/collector/dhcp/dhcp.go index 49991e64..122547f6 100644 --- a/internal/collector/dhcp/dhcp.go +++ b/internal/collector/dhcp/dhcp.go @@ -148,12 +148,79 @@ func (c *Collector) Close() error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { var err error - if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) { - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil) - if err != nil { - return fmt.Errorf("failed to create DHCP Server collector: %w", err) - } + if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) { + c.scopeInfo = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_info"), + "DHCP Scope information", + []string{"name", "superscope_name", "superscope_id", "scope"}, + nil, + ) + c.scopeState = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_state"), + "DHCP Scope state", + []string{"scope", "state"}, + nil, + ) + + c.scopeAddressesFreeTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"), + "DHCP Scope free addresses", + []string{"scope"}, + nil, + ) + + c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"), + "DHCP Scope free addresses on partner server", + []string{"scope"}, + nil, + ) + + c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"), + "DHCP Scope free addresses on this server", + []string{"scope"}, + nil, + ) + + c.scopeAddressesInUseTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"), + "DHCP Scope addresses in use", + []string{"scope"}, + nil, + ) + + c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"), + "DHCP Scope addresses in use on partner server", + []string{"scope"}, + nil, + ) + + c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"), + "DHCP Scope addresses in use on this server", + []string{"scope"}, + nil, + ) + + c.scopePendingOffersTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"), + "DHCP Scope pending offers", + []string{"scope"}, + nil, + ) + + c.scopeReservedAddressTotal = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"), + "DHCP Scope reserved addresses", + []string{"scope"}, + nil, + ) + } + + if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) { c.packetsReceivedTotal = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"), "Total number of packets received by the DHCP server (PacketsReceivedTotal)", @@ -304,78 +371,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, nil, ) - } - if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) { - c.scopeInfo = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_info"), - "DHCP Scope information", - []string{"name", "superscope_name", "superscope_id", "scope"}, - nil, - ) - - c.scopeState = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_state"), - "DHCP Scope state", - []string{"scope", "state"}, - nil, - ) - - c.scopeAddressesFreeTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"), - "DHCP Scope free addresses", - []string{"scope"}, - nil, - ) - - c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"), - "DHCP Scope free addresses on partner server", - []string{"scope"}, - nil, - ) - - c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"), - "DHCP Scope free addresses on this server", - []string{"scope"}, - nil, - ) - - c.scopeAddressesInUseTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"), - "DHCP Scope addresses in use", - []string{"scope"}, - nil, - ) - - c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"), - "DHCP Scope addresses in use on partner server", - []string{"scope"}, - nil, - ) - - c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"), - "DHCP Scope addresses in use on this server", - []string{"scope"}, - nil, - ) - - c.scopePendingOffersTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"), - "DHCP Scope pending offers", - []string{"scope"}, - nil, - ) - - c.scopeReservedAddressTotal = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"), - "DHCP Scope reserved addresses", - []string{"scope"}, - nil, - ) + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil) + if err != nil { + return fmt.Errorf("failed to create DHCP Server collector: %w", err) + } } return nil diff --git a/internal/collector/diskdrive/diskdrive.go b/internal/collector/diskdrive/diskdrive.go index a95c3f85..49947059 100644 --- a/internal/collector/diskdrive/diskdrive.go +++ b/internal/collector/diskdrive/diskdrive.go @@ -72,18 +72,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { - if miSession == nil { - return errors.New("miSession is nil") - } - - miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive") - if err != nil { - return fmt.Errorf("failed to create WMI query: %w", err) - } - - c.miQuery = miQuery - c.miSession = miSession - c.diskInfo = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "info"), "General drive information", @@ -120,6 +108,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { nil, ) + if miSession == nil { + return errors.New("miSession is nil") + } + + miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive") + if err != nil { + return fmt.Errorf("failed to create WMI query: %w", err) + } + + c.miQuery = miQuery + c.miSession = miSession + var dst []diskDrive if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil { return fmt.Errorf("WMI query failed: %w", err) diff --git a/internal/collector/dns/dns.go b/internal/collector/dns/dns.go index 0a9ed82d..e93224be 100644 --- a/internal/collector/dns/dns.go +++ b/internal/collector/dns/dns.go @@ -91,13 +91,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create DNS collector: %w", err) - } - c.zoneTransferRequestsReceived = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"), "Number of zone transfer requests (AXFR/IXFR) received by the master DNS server", @@ -231,6 +224,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create DNS collector: %w", err) + } + return nil } diff --git a/internal/collector/iis/iis.go b/internal/collector/iis/iis.go index 9e66b4ca..d964a4f0 100644 --- a/internal/collector/iis/iis.go +++ b/internal/collector/iis/iis.go @@ -167,7 +167,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { prometheus.Labels{"version": fmt.Sprintf("%d.%d", c.iisVersion.major, c.iisVersion.minor)}, ) - errs := make([]error, 0, 4) + errs := make([]error, 0) if err := c.buildWebService(); err != nil { errs = append(errs, fmt.Errorf("failed to build Web Service collector: %w", err)) @@ -247,7 +247,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error { 1, ) - errs := make([]error, 0, 4) + errs := make([]error, 0) if err := c.collectWebService(ch); err != nil { errs = append(errs, fmt.Errorf("failed to collect Web Service metrics: %w", err)) diff --git a/internal/collector/logical_disk/logical_disk.go b/internal/collector/logical_disk/logical_disk.go index 1075438f..bd16553e 100644 --- a/internal/collector/logical_disk/logical_disk.go +++ b/internal/collector/logical_disk/logical_disk.go @@ -150,13 +150,6 @@ func (c *Collector) Close() error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { c.logger = logger.With(slog.String("collector", Name)) - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create LogicalDisk collector: %w", err) - } - c.information = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "info"), "A metric with a constant '1' value labeled with logical disk information", @@ -281,6 +274,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create LogicalDisk collector: %w", err) + } + return nil } diff --git a/internal/collector/memory/memory.go b/internal/collector/memory/memory.go index 3c779e83..20f3a392 100644 --- a/internal/collector/memory/memory.go +++ b/internal/collector/memory/memory.go @@ -110,13 +110,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Memory collector: %w", err) - } - c.availableBytes = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "available_bytes"), "The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+ @@ -340,13 +333,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Memory collector: %w", err) + } + return nil } // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 2) + errs := make([]error, 0) if err := c.collectPDH(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting memory metrics: %w", err)) @@ -390,6 +390,8 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error { err := c.perfDataCollector.Collect(&c.perfDataObject) if err != nil { return fmt.Errorf("failed to collect Memory metrics: %w", err) + } else if len(c.perfDataObject) == 0 { + return fmt.Errorf("failed to collect Memory metrics: %w", types.ErrNoDataUnexpected) } ch <- prometheus.MustNewConstMetric( diff --git a/internal/collector/mscluster/mscluster.go b/internal/collector/mscluster/mscluster.go index 9ebc5450..0b6758e0 100644 --- a/internal/collector/mscluster/mscluster.go +++ b/internal/collector/mscluster/mscluster.go @@ -122,7 +122,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { c.miSession = miSession - errs := make([]error, 0, 5) + errs := make([]error, 0) if slices.Contains(c.config.CollectorsEnabled, subCollectorCluster) { if err := c.buildCluster(); err != nil { @@ -227,7 +227,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error { wg.Wait() close(errCh) - errs := make([]error, 0, 5) + errs := make([]error, 0) for err := range errCh { errs = append(errs, err) diff --git a/internal/collector/msmq/msmq.go b/internal/collector/msmq/msmq.go index 2111c6bf..bbee313d 100644 --- a/internal/collector/msmq/msmq.go +++ b/internal/collector/msmq/msmq.go @@ -74,13 +74,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create MSMQ Queue collector: %w", err) - } - c.bytesInJournalQueue = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"), "Size of queue journal in bytes", @@ -106,6 +99,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create MSMQ Queue collector: %w", err) + } + return nil } diff --git a/internal/collector/net/net.go b/internal/collector/net/net.go index 7e445c09..e3af2a39 100644 --- a/internal/collector/net/net.go +++ b/internal/collector/net/net.go @@ -157,17 +157,12 @@ func (c *Collector) Close() error { } func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Network Interface collector: %w", err) - } - - if slices.Contains(c.config.CollectorsEnabled, "addresses") { - logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.", - slog.String("collector", Name), - ) + for _, collector := range c.config.CollectorsEnabled { + if !slices.Contains([]string{subCollectorMetrics, subCollectorNicInfo}, collector) { + return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector, + strings.Join([]string{subCollectorMetrics, subCollectorNicInfo}, ", "), + ) + } } c.bytesReceivedTotal = prometheus.NewDesc( @@ -261,22 +256,35 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Network Interface", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Network Interface collector: %w", err) + } + + if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) { + logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.", + slog.String("collector", Name), + ) + } + return nil } // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 2) + errs := make([]error, 0) - if slices.Contains(c.config.CollectorsEnabled, "metrics") { + if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) { if err := c.collect(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting metrics: %w", err)) } } - if slices.Contains(c.config.CollectorsEnabled, "nic_addresses") { - if err := c.collectNICAddresses(ch); err != nil { + if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) { + if err := c.collectNICInfo(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting net addresses: %w", err)) } } diff --git a/internal/collector/nps/nps.go b/internal/collector/nps/nps.go index abaec15f..62b857bb 100644 --- a/internal/collector/nps/nps.go +++ b/internal/collector/nps/nps.go @@ -94,20 +94,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - errs := make([]error, 0, 2) - - c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil) - if err != nil { - errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err)) - } - - c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil) - if err != nil { - errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err)) - } - c.accessAccepts = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "access_accepts"), "(AccessAccepts)", @@ -260,13 +246,27 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + errs := make([]error, 0) + + c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil) + if err != nil { + errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err)) + } + + c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil) + if err != nil { + errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err)) + } + return errors.Join(errs...) } // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 2) + errs := make([]error, 0) if err := c.collectAccept(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting NPS accept data: %w", err)) diff --git a/internal/collector/os/os.go b/internal/collector/os/os.go index ddbca422..c7f70e2f 100644 --- a/internal/collector/os/os.go +++ b/internal/collector/os/os.go @@ -209,7 +209,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 4) + errs := make([]error, 0) c.collect(ch) diff --git a/internal/collector/pagefile/pagefile.go b/internal/collector/pagefile/pagefile.go index 849e1319..1af80dc2 100644 --- a/internal/collector/pagefile/pagefile.go +++ b/internal/collector/pagefile/pagefile.go @@ -74,13 +74,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Paging File collector: %w", err) - } - c.pagingLimitBytes = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "limit_bytes"), "Number of bytes that can be stored in the operating system paging files. 0 (zero) indicates that there are no paging files", @@ -95,6 +88,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Paging File collector: %w", err) + } + return nil } diff --git a/internal/collector/physical_disk/physical_disk.go b/internal/collector/physical_disk/physical_disk.go index 431aad5a..f2b6e612 100644 --- a/internal/collector/physical_disk/physical_disk.go +++ b/internal/collector/physical_disk/physical_disk.go @@ -127,13 +127,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create PhysicalDisk collector: %w", err) - } - c.requestsQueued = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "requests_queued"), "The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)", @@ -218,6 +211,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create PhysicalDisk collector: %w", err) + } + return nil } diff --git a/internal/collector/printer/printer.go b/internal/collector/printer/printer.go index d58a1ec7..105c9b6e 100644 --- a/internal/collector/printer/printer.go +++ b/internal/collector/printer/printer.go @@ -126,25 +126,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { - if miSession == nil { - return errors.New("miSession is nil") - } - - miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer") - if err != nil { - return fmt.Errorf("failed to create WMI query: %w", err) - } - - c.miQueryPrinter = miQuery - - miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob") - if err != nil { - return fmt.Errorf("failed to create WMI query: %w", err) - } - - c.miQueryPrinterJobs = miQuery - c.miSession = miSession - c.printerJobStatus = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "job_status"), "A counter of printer jobs by status", @@ -164,6 +145,25 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { nil, ) + if miSession == nil { + return errors.New("miSession is nil") + } + + miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer") + if err != nil { + return fmt.Errorf("failed to create WMI query: %w", err) + } + + c.miQueryPrinter = miQuery + + miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob") + if err != nil { + return fmt.Errorf("failed to create WMI query: %w", err) + } + + c.miQueryPrinterJobs = miQuery + c.miSession = miSession + return nil } diff --git a/internal/collector/remote_fx/remote_fx.go b/internal/collector/remote_fx/remote_fx.go index d43197d6..8ea9149c 100644 --- a/internal/collector/remote_fx/remote_fx.go +++ b/internal/collector/remote_fx/remote_fx.go @@ -102,18 +102,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(*slog.Logger, *mi.Session) error { - var err error - - c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create RemoteFX Network collector: %w", err) - } - - c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err) - } - // net c.baseTCPRTT = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"), @@ -238,13 +226,27 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error { nil, ) - return nil + var err error + + errs := make([]error, 0) + + c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll) + if err != nil { + errs = append(errs, fmt.Errorf("failed to create RemoteFX Network collector: %w", err)) + } + + c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll) + if err != nil { + errs = append(errs, fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)) + } + + return errors.Join(errs...) } // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 2) + errs := make([]error, 0) if err := c.collectRemoteFXNetworkCount(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting RemoteFX Network metrics: %w", err)) diff --git a/internal/collector/smb/smb.go b/internal/collector/smb/smb.go index bc9a183e..ce9ac8f7 100644 --- a/internal/collector/smb/smb.go +++ b/internal/collector/smb/smb.go @@ -76,13 +76,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create SMB Server Shares collector: %w", err) - } - c.currentOpenFileCount = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "server_shares_current_open_file_count"), "Current total count open files on the SMB Server Share", @@ -132,6 +125,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create SMB Server Shares collector: %w", err) + } + return nil } diff --git a/internal/collector/smbclient/smbclient.go b/internal/collector/smbclient/smbclient.go index c6d11704..6f0bc809 100644 --- a/internal/collector/smbclient/smbclient.go +++ b/internal/collector/smbclient/smbclient.go @@ -91,13 +91,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create SMB Client Shares collector: %w", err) - } - // desc creates a new prometheus description desc := func(metricName string, description string, labels []string) *prometheus.Desc { return prometheus.NewDesc( @@ -193,6 +186,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { []string{"server", "share"}, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create SMB Client Shares collector: %w", err) + } + return nil } diff --git a/internal/collector/smtp/smtp.go b/internal/collector/smtp/smtp.go index e0049067..1250c293 100644 --- a/internal/collector/smtp/smtp.go +++ b/internal/collector/smtp/smtp.go @@ -157,13 +157,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create SMTP Server collector: %w", err) - } - logger.Info("smtp collector is in an experimental state! Metrics for this collector have not been tested.", slog.String("collector", Name), ) @@ -421,6 +414,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create SMTP Server collector: %w", err) + } + return nil } diff --git a/internal/collector/system/system.go b/internal/collector/system/system.go index dec0eedd..c3818929 100644 --- a/internal/collector/system/system.go +++ b/internal/collector/system/system.go @@ -77,13 +77,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil) - if err != nil { - return fmt.Errorf("failed to create System collector: %w", err) - } - c.bootTime = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"), "Unix timestamp of system boot time", @@ -134,6 +127,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil) + if err != nil { + return fmt.Errorf("failed to create System collector: %w", err) + } + return nil } diff --git a/internal/collector/tcp/tcp.go b/internal/collector/tcp/tcp.go index b54c0513..86f90252 100644 --- a/internal/collector/tcp/tcp.go +++ b/internal/collector/tcp/tcp.go @@ -118,18 +118,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil) - if err != nil { - return fmt.Errorf("failed to create TCPv4 collector: %w", err) - } - - c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil) - if err != nil { - return fmt.Errorf("failed to create TCPv6 collector: %w", err) - } - c.connectionFailures = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "connection_failures_total"), "(TCP.ConnectionFailures)", @@ -190,13 +178,25 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { []string{"af", "state"}, nil, ) + var err error + + c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil) + if err != nil { + return fmt.Errorf("failed to create TCPv4 collector: %w", err) + } + + c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil) + if err != nil { + return fmt.Errorf("failed to create TCPv6 collector: %w", err) + } + return nil } // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 2) + errs := make([]error, 0) if slices.Contains(c.config.CollectorsEnabled, "metrics") { if err := c.collect(ch); err != nil { diff --git a/internal/collector/terminal_services/terminal_services.go b/internal/collector/terminal_services/terminal_services.go index 107345a2..6e9bcd08 100644 --- a/internal/collector/terminal_services/terminal_services.go +++ b/internal/collector/terminal_services/terminal_services.go @@ -133,25 +133,8 @@ func (c *Collector) Close() error { } func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error { - if miSession == nil { - return errors.New("miSession is nil") - } - c.logger = logger.With(slog.String("collector", Name)) - c.connectionBrokerEnabled = isConnectionBrokerServer(miSession) - - if c.connectionBrokerEnabled { - var err error - - c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err) - } - } else { - logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.") - } - c.sessionInfo = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "session_info"), "Terminal Services sessions info", @@ -243,8 +226,23 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error { nil, ) + if miSession == nil { + return errors.New("miSession is nil") + } + var err error + c.connectionBrokerEnabled = isConnectionBrokerServer(miSession) + + if c.connectionBrokerEnabled { + c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err) + } + } else { + logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.") + } + c.hServer, err = wtsapi32.WTSOpenServer("") if err != nil { return fmt.Errorf("failed to open WTS server: %w", err) @@ -261,7 +259,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error { // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 3) + errs := make([]error, 0) if err := c.collectWTSSessions(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting terminal services session infos: %w", err)) diff --git a/internal/collector/textfile/textfile_test_test.go b/internal/collector/textfile/textfile_test_test.go index 0be304db..e71f8fac 100644 --- a/internal/collector/textfile/textfile_test_test.go +++ b/internal/collector/textfile/textfile_test_test.go @@ -44,7 +44,7 @@ func TestMultipleDirectories(t *testing.T) { }) collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector}) - require.NoError(t, collectors.Build(logger)) + require.NoError(t, collectors.Build(t.Context(), logger)) metrics := make(chan prometheus.Metric) got := "" @@ -81,7 +81,7 @@ func TestDuplicateFileName(t *testing.T) { }) collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector}) - require.NoError(t, collectors.Build(logger)) + require.NoError(t, collectors.Build(t.Context(), logger)) metrics := make(chan prometheus.Metric) got := "" diff --git a/internal/collector/thermalzone/thermalzone.go b/internal/collector/thermalzone/thermalzone.go index 0f14ba73..9536d4f5 100644 --- a/internal/collector/thermalzone/thermalzone.go +++ b/internal/collector/thermalzone/thermalzone.go @@ -70,13 +70,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll) - if err != nil { - return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err) - } - c.temperature = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"), "(Temperature)", @@ -102,6 +95,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll) + if err != nil { + return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err) + } + return nil } diff --git a/internal/collector/time/time.go b/internal/collector/time/time.go index 6c8273f0..80f31a26 100644 --- a/internal/collector/time/time.go +++ b/internal/collector/time/time.go @@ -200,7 +200,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 2) + errs := make([]error, 0) if slices.Contains(c.config.CollectorsEnabled, collectorSystemTime) { if err := c.collectTime(ch); err != nil { diff --git a/internal/collector/udp/udp.go b/internal/collector/udp/udp.go index aac6a4e9..5227414e 100644 --- a/internal/collector/udp/udp.go +++ b/internal/collector/udp/udp.go @@ -80,18 +80,6 @@ func (c *Collector) Close() error { } func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { - var err error - - c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil) - if err != nil { - return fmt.Errorf("failed to create UDPv4 collector: %w", err) - } - - c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil) - if err != nil { - return fmt.Errorf("failed to create UDPv6 collector: %w", err) - } - c.datagramsNoPortTotal = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "datagram_no_port_total"), "Number of received UDP datagrams for which there was no application at the destination port", @@ -117,6 +105,18 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + var err error + + c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil) + if err != nil { + return fmt.Errorf("failed to create UDPv4 collector: %w", err) + } + + c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil) + if err != nil { + return fmt.Errorf("failed to create UDPv6 collector: %w", err) + } + return nil } diff --git a/internal/collector/vmware/vmware.go b/internal/collector/vmware/vmware.go index 22be6dd8..0bbb4dc2 100644 --- a/internal/collector/vmware/vmware.go +++ b/internal/collector/vmware/vmware.go @@ -230,7 +230,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { // Collect sends the metric values for each metric // to the provided prometheus Metric channel. func (c *Collector) Collect(ch chan<- prometheus.Metric) error { - errs := make([]error, 0, 2) + errs := make([]error, 0) if err := c.collectCpu(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting vmware cpu metrics: %w", err)) diff --git a/internal/types/errors.go b/internal/types/errors.go index 3694d7cc..54ee6d6a 100644 --- a/internal/types/errors.go +++ b/internal/types/errors.go @@ -18,4 +18,5 @@ import "errors" var ( ErrCollectorNotInitialized = errors.New("collector not initialized") ErrNoData = errors.New("no data") + ErrNoDataUnexpected = errors.New("no data") ) diff --git a/internal/utils/testutils/testutils.go b/internal/utils/testutils/testutils.go index d35f30a0..75546132 100644 --- a/internal/utils/testutils/testutils.go +++ b/internal/utils/testutils/testutils.go @@ -47,7 +47,7 @@ func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, co } collectors := collector.New(map[string]collector.Collector{name: c}) - require.NoError(b, collectors.Build(logger)) + require.NoError(b, collectors.Build(b.Context(), logger)) metrics := make(chan prometheus.Metric) diff --git a/pkg/collector/collection.go b/pkg/collector/collection.go index 111a2269..4919c3eb 100644 --- a/pkg/collector/collection.go +++ b/pkg/collector/collection.go @@ -201,7 +201,7 @@ func (c *Collection) Enable(enabledCollectors []string) error { // Build To be called by the exporter for collector initialization. // Instead, fail fast, it will try to build all collectors and return all errors. // errors are joined with errors.Join. -func (c *Collection) Build(logger *slog.Logger) error { +func (c *Collection) Build(ctx context.Context, logger *slog.Logger) error { c.startTime = gotime.Now() err := c.initMI() @@ -236,7 +236,7 @@ func (c *Collection) Build(logger *slog.Logger) error { errors.Is(err, pdh.NewPdhError(pdh.CstatusNoObject)) || errors.Is(err, pdh.NewPdhError(pdh.CstatusNoCounter)) || errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE) { - logger.LogAttrs(context.Background(), slog.LevelWarn, "couldn't initialize collector", slog.Any("err", err)) + logger.LogAttrs(ctx, slog.LevelWarn, "couldn't initialize collector", slog.Any("err", err)) continue } From 50808c73fe44e1bddffe6f74760e5e1d4c384b3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Fri, 4 Apr 2025 20:20:08 +0200 Subject: [PATCH 08/14] logon: deprecate collector. Use `terminal_services` instead (#1957) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke (cherry picked from commit 0846c2805f6c58f89ca7a82e0f0fb7236eac4a5d) --- README.md | 1 - docs/collector.logon.md | 78 ------------------- internal/collector/logon/logon.go | 11 ++- internal/collector/os/os.go | 6 +- .../terminal_services/terminal_services.go | 2 +- tools/e2e-output.txt | 4 +- 6 files changed, 15 insertions(+), 87 deletions(-) delete mode 100644 docs/collector.logon.md diff --git a/README.md b/README.md index 934c625c..2145972c 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,6 @@ Name | Description | Enabled by default [iis](docs/collector.iis.md) | IIS sites and applications | [license](docs/collector.license.md) | Windows license status | [logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓ -[logon](docs/collector.logon.md) | User logon sessions | [memory](docs/collector.memory.md) | Memory usage metrics | ✓ [mscluster](docs/collector.mscluster.md) | MSCluster metrics | [msmq](docs/collector.msmq.md) | MSMQ queues | diff --git a/docs/collector.logon.md b/docs/collector.logon.md deleted file mode 100644 index 93d7088f..00000000 --- a/docs/collector.logon.md +++ /dev/null @@ -1,78 +0,0 @@ -# logon collector - -The logon collector exposes metrics detailing the active user logon sessions. - -| | | -|---------------------|-----------| -| Metric name prefix | `logon` | -| Source | Win32 API | -| Enabled by default? | No | - -## Flags - -None - -## Metrics - -| Name | Description | Type | Labels | -|-------------------------------------------|--------------------------------------------|-------|------------------------------------| -| `windows_logon_session_logon_timestamp_seconds` | timestamp of the logon session in seconds. | gauge | `domain`, `id`, `type`, `username` | - -### Example metric -Query the total number of interactive logon sessions -``` -# HELP windows_logon_session_logon_timestamp_seconds timestamp of the logon session in seconds. -# TYPE windows_logon_session_logon_timestamp_seconds gauge -windows_logon_session_logon_timestamp_seconds{domain="",id="0x0:0x8c54",type="System",username=""} 1.72876928e+09 -windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x991a",type="Interactive",username="UMFD-1"} 1.728769282e+09 -windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x9933",type="Interactive",username="UMFD-0"} 1.728769282e+09 -windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x994a",type="Interactive",username="UMFD-0"} 1.728769282e+09 -windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0x999d",type="Interactive",username="UMFD-1"} 1.728769282e+09 -windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf25a",type="Interactive",username="UMFD-2"} 1.728769532e+09 -windows_logon_session_logon_timestamp_seconds{domain="Font Driver Host",id="0x0:0xbf290",type="Interactive",username="UMFD-2"} 1.728769532e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x130241",type="Network",username="vm-jok-dev$"} 1.728769625e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x24f7c9",type="Network",username="vm-jok-dev$"} 1.728770121e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x276846",type="Network",username="vm-jok-dev$"} 1.728770195e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e4",type="Service",username="vm-jok-dev$"} 1.728769283e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x3e7",type="System",username="vm-jok-dev$"} 1.728769279e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x71d0f",type="Network",username="vm-jok-dev$"} 1.728769324e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x720a3",type="Network",username="vm-jok-dev$"} 1.728769324e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x725cb",type="Network",username="vm-jok-dev$"} 1.728769324e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0x753d8",type="Network",username="vm-jok-dev$"} 1.728769325e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xa3913",type="Network",username="vm-jok-dev$"} 1.728769385e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xbe7f2",type="Network",username="jok"} 1.728769531e+09 -windows_logon_session_logon_timestamp_seconds{domain="JKROEPKE",id="0x0:0xc76c4",type="RemoteInteractive",username="jok"} 1.728769533e+09 -windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e3",type="Service",username="IUSR"} 1.728769295e+09 -windows_logon_session_logon_timestamp_seconds{domain="NT AUTHORITY",id="0x0:0x3e5",type="Service",username="LOCAL SERVICE"} 1.728769283e+09 -windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xae4c7",type="Service",username="MSSQLSERVER"} 1.728769425e+09 -windows_logon_session_logon_timestamp_seconds{domain="NT Service",id="0x0:0xb42f1",type="Service",username="SQLTELEMETRY"} 1.728769431e+09 -windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfbac",type="Interactive",username="DWM-2"} 1.728769532e+09 -windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xbfc72",type="Interactive",username="DWM-2"} 1.728769532e+09 -windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdedd",type="Interactive",username="DWM-1"} 1.728769283e+09 -windows_logon_session_logon_timestamp_seconds{domain="Window Manager",id="0x0:0xdefd",type="Interactive",username="DWM-1"} 1.728769283e+09 -``` - -### Possible values for `type` - -- System -- Interactive -- Network -- Batch -- Service -- Proxy -- Unlock -- NetworkCleartext -- NewCredentials -- RemoteInteractive -- CachedInteractive -- CachedRemoteInteractive -- CachedUnlock - -## Useful queries -Query the total number of local and remote (I.E. Terminal Services) interactive sessions. -``` -count(windows_logon_logon_type{type=~"Interactive|RemoteInteractive"}) by (type) -``` - -## Alerting examples -_This collector doesn’t yet have alerting examples, we would appreciate your help adding them!_ diff --git a/internal/collector/logon/logon.go b/internal/collector/logon/logon.go index 6d91829b..c3955a89 100644 --- a/internal/collector/logon/logon.go +++ b/internal/collector/logon/logon.go @@ -34,6 +34,7 @@ type Config struct{} var ConfigDefaults = Config{} // A Collector is a Prometheus Collector for WMI metrics. +// Deprecated: Use windows_terminal_services_session_info instead. type Collector struct { config Config @@ -64,10 +65,16 @@ func (c *Collector) Close() error { return nil } -func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { +func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { + logger.Warn("The logon collector will be removed mid 2025. "+ + "See https://github.com/prometheus-community/windows_exporter/pull/1957 for more information. If you see values in this collector"+ + " that you need, please open an issue to discuss how to get them into the new collector.", + slog.String("collector", Name), + ) + c.sessionInfo = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "session_logon_timestamp_seconds"), - "timestamp of the logon session in seconds.", + "Deprecated. Use windows_terminal_services_session_info instead.", []string{"id", "username", "domain", "type"}, nil, ) diff --git a/internal/collector/os/os.go b/internal/collector/os/os.go index c7f70e2f..ac052326 100644 --- a/internal/collector/os/os.go +++ b/internal/collector/os/os.go @@ -53,7 +53,7 @@ type Collector struct { processesLimit *prometheus.Desc // users - // Deprecated: Use count(windows_logon_logon_type) instead. + // Deprecated: Use `sum(windows_terminal_services_session_info{state="active"})` instead. users *prometheus.Desc // physicalMemoryFreeBytes @@ -106,7 +106,7 @@ func (c *Collector) Close() error { } func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { - logger.Warn("The os collect holds a number of deprecated metrics and will be removed mid 2025. "+ + logger.Warn("The os collector holds a number of deprecated metrics and will be removed mid 2025. "+ "See https://github.com/prometheus-community/windows_exporter/pull/1596 for more information.", slog.String("collector", Name), ) @@ -180,7 +180,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { ) c.users = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "users"), - "Deprecated: Use `count(windows_logon_logon_type)` instead.", + "Deprecated: Use `sum(windows_terminal_services_session_info{state=\"active\"})` instead.", nil, nil, ) diff --git a/internal/collector/terminal_services/terminal_services.go b/internal/collector/terminal_services/terminal_services.go index 6e9bcd08..c25cf81e 100644 --- a/internal/collector/terminal_services/terminal_services.go +++ b/internal/collector/terminal_services/terminal_services.go @@ -437,7 +437,7 @@ func (c *Collector) collectWTSSessions(ch chan<- prometheus.Metric) error { for _, session := range sessions { // only connect metrics for remote named sessions n := strings.ReplaceAll(session.SessionName, "#", " ") - if n == "" || n == "Services" || n == "Console" { + if n == "" || n == "Services" { continue } diff --git a/tools/e2e-output.txt b/tools/e2e-output.txt index a5dcfec0..7b4e6f65 100644 --- a/tools/e2e-output.txt +++ b/tools/e2e-output.txt @@ -193,7 +193,7 @@ windows_exporter_collector_timeout{collector="udp"} 0 # TYPE windows_logical_disk_write_seconds_total counter # HELP windows_logical_disk_writes_total The number of write operations on the disk (LogicalDisk.DiskWritesPerSec) # TYPE windows_logical_disk_writes_total counter -# HELP windows_logon_session_logon_timestamp_seconds timestamp of the logon session in seconds. +# HELP windows_logon_session_logon_timestamp_seconds Deprecated. Use windows_terminal_services_session_info instead. # TYPE windows_logon_session_logon_timestamp_seconds gauge # HELP windows_memory_available_bytes The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists (AvailableBytes) # TYPE windows_memory_available_bytes gauge @@ -307,7 +307,7 @@ windows_exporter_collector_timeout{collector="udp"} 0 # TYPE windows_os_time gauge # HELP windows_os_timezone Deprecated: Use windows_time_timezone instead. # TYPE windows_os_timezone gauge -# HELP windows_os_users Deprecated: Use `count(windows_logon_logon_type)` instead. +# HELP windows_os_users Deprecated: Use `sum(windows_terminal_services_session_info{state="active"})` instead. # TYPE windows_os_users gauge # HELP windows_os_virtual_memory_bytes Deprecated: Use `windows_memory_commit_limit` instead. # TYPE windows_os_virtual_memory_bytes gauge From 759faee1c359e0d060bf4eb3536874d128bb572b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Mon, 31 Mar 2025 22:20:26 +0200 Subject: [PATCH 09/14] mssql: support initial non default instances names (#1958) (cherry picked from commit fa8af098c8b004544b9d007e4042a3c2a965e51d) --- internal/collector/mssql/mssql.go | 20 ++-- .../collector/mssql/mssql_access_methods.go | 96 +++++++-------- .../mssql/mssql_availability_replica.go | 26 ++-- .../collector/mssql/mssql_buffer_manager.go | 54 ++++----- internal/collector/mssql/mssql_database.go | 112 +++++++++--------- .../collector/mssql/mssql_database_replica.go | 56 ++++----- .../mssql/mssql_general_statistics.go | 56 ++++----- internal/collector/mssql/mssql_locks.go | 24 ++-- .../collector/mssql/mssql_memory_manager.go | 48 ++++---- internal/collector/mssql/mssql_sql_errors.go | 10 +- internal/collector/mssql/mssql_sql_stats.go | 30 ++--- .../collector/mssql/mssql_transactions.go | 34 +++--- internal/collector/mssql/mssql_wait_stats.go | 32 ++--- internal/collector/mssql/types.go | 20 ++-- 14 files changed, 310 insertions(+), 308 deletions(-) diff --git a/internal/collector/mssql/mssql.go b/internal/collector/mssql/mssql.go index 1ff8013c..92c7b927 100644 --- a/internal/collector/mssql/mssql.go +++ b/internal/collector/mssql/mssql.go @@ -333,7 +333,7 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) { return nil, fmt.Errorf("couldn't get instance info: %w", err) } - instance, err := newMssqlInstance(instanceVersion) + instance, err := newMssqlInstance(instanceName, instanceVersion) if err != nil { return nil, err } @@ -348,14 +348,14 @@ func (c *Collector) getMSSQLInstances() ([]mssqlInstance, error) { // mssqlGetPerfObjectName returns the name of the Windows Performance // Counter object for the given SQL instance and Collector. -func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string) string { +func (c *Collector) mssqlGetPerfObjectName(sqlInstance mssqlInstance, collector string) string { sb := strings.Builder{} - if sqlInstance == "MSSQLSERVER" { + if sqlInstance.isFirstInstance { sb.WriteString("SQLServer:") } else { sb.WriteString("MSSQL$") - sb.WriteString(sqlInstance) + sb.WriteString(sqlInstance.name) sb.WriteString(":") } @@ -369,8 +369,8 @@ func (c *Collector) mssqlGetPerfObjectName(sqlInstance string, collector string) func (c *Collector) collect( ch chan<- prometheus.Metric, collector string, - perfDataCollectors map[string]*pdh.Collector, - collectFn func(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error, + perfDataCollectors map[mssqlInstance]*pdh.Collector, + collectFn func(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error, ) error { errs := make([]error, 0, len(perfDataCollectors)) @@ -386,11 +386,11 @@ func (c *Collector) collect( errs = append(errs, err) success = 0.0 - c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance, duration), + c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance.name, duration), slog.Any("err", err), ) } else { - c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance, duration)) + c.logger.LogAttrs(ctx, slog.LevelDebug, fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s", collector, sqlInstance.name, duration)) } if collector == "" { @@ -401,13 +401,13 @@ func (c *Collector) collect( c.mssqlScrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), - collector, sqlInstance, + collector, sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.mssqlScrapeSuccessDesc, prometheus.GaugeValue, success, - collector, sqlInstance, + collector, sqlInstance.name, ) } diff --git a/internal/collector/mssql/mssql_access_methods.go b/internal/collector/mssql/mssql_access_methods.go index 0d9a7b68..c90a526c 100644 --- a/internal/collector/mssql/mssql_access_methods.go +++ b/internal/collector/mssql/mssql_access_methods.go @@ -25,7 +25,7 @@ import ( ) type collectorAccessMethods struct { - accessMethodsPerfDataCollectors map[string]*pdh.Collector + accessMethodsPerfDataCollectors map[mssqlInstance]*pdh.Collector accessMethodsPerfDataObject []perfDataCounterValuesAccessMethods accessMethodsAUcleanupbatches *prometheus.Desc @@ -124,11 +124,11 @@ type perfDataCounterValuesAccessMethods struct { func (c *Collector) buildAccessMethods() error { var err error - c.accessMethodsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.accessMethodsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.accessMethodsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Access Methods"), nil) + c.accessMethodsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil) if err != nil { errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance.name, err)) } @@ -407,7 +407,7 @@ func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorAccessMethods, c.accessMethodsPerfDataCollectors, c.collectAccessMethodsInstance) } -func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.accessMethodsPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err) @@ -417,308 +417,308 @@ func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sq c.accessMethodsAUcleanupbatches, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupbatchesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsAUcleanups, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsAUCleanupsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsByReferenceLobCreateCount, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobCreateCount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsByReferenceLobUseCount, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsByReferenceLobUseCount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsCountLobReadahead, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsCountLobReadahead, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsCountPullInRow, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsCountPullInRow, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsCountPushOffRow, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsCountPushOffRow, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsDeferreddroppedAUs, prometheus.GaugeValue, c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedAUs, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsDeferredDroppedrowsets, prometheus.GaugeValue, c.accessMethodsPerfDataObject[0].AccessMethodsDeferredDroppedRowsets, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsDroppedrowsetcleanups, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetCleanupsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsDroppedrowsetsskipped, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsDroppedRowsetsSkippedPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsExtentDeallocations, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsExtentDeallocationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsExtentsAllocated, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsExtentsAllocatedPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsFailedAUcleanupbatches, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsFailedAUCleanupBatchesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsFailedleafpagecookie, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsFailedLeafPageCookie, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsFailedtreepagecookie, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsFailedTreePageCookie, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsForwardedRecords, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsForwardedRecordsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsFreeSpacePageFetches, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpacePageFetchesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsFreeSpaceScans, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsFreeSpaceScansPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsFullScans, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsFullScansPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsIndexSearches, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsIndexSearchesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsInSysXactwaits, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsInSysXactWaitsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsLobHandleCreateCount, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleCreateCount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsLobHandleDestroyCount, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsLobHandleDestroyCount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsLobSSProviderCreateCount, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderCreateCount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsLobSSProviderDestroyCount, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderDestroyCount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsLobSSProviderTruncationCount, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsLobSSProviderTruncationCount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsMixedPageAllocations, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsMixedPageAllocationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsPageCompressionAttempts, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsPageCompressionAttemptsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsPageDeallocations, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsPageDeallocationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsPagesAllocated, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsPagesAllocatedPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsPagesCompressed, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsPagesCompressedPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsPageSplits, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsPageSplitsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsProbeScans, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsProbeScansPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsRangeScans, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsRangeScansPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsScanPointRevalidations, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsScanPointRevalidationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsSkippedGhostedRecords, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsSkippedGhostedRecordsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsTableLockEscalations, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsTableLockEscalationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsUsedleafpagecookie, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsUsedLeafPageCookie, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsUsedtreepagecookie, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsUsedTreePageCookie, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsWorkfilesCreated, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsWorkfilesCreatedPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsWorktablesCreated, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesCreatedPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsWorktablesFromCacheHits, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatio, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.accessMethodsWorktablesFromCacheLookups, prometheus.CounterValue, c.accessMethodsPerfDataObject[0].AccessMethodsWorktablesFromCacheRatioBase, - sqlInstance, + sqlInstance.name, ) return nil diff --git a/internal/collector/mssql/mssql_availability_replica.go b/internal/collector/mssql/mssql_availability_replica.go index 3451625a..17546657 100644 --- a/internal/collector/mssql/mssql_availability_replica.go +++ b/internal/collector/mssql/mssql_availability_replica.go @@ -26,7 +26,7 @@ import ( ) type collectorAvailabilityReplica struct { - availabilityReplicaPerfDataCollectors map[string]*pdh.Collector + availabilityReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector availabilityReplicaPerfDataObject []perfDataCounterValuesAvailabilityReplica availReplicaBytesReceivedFromReplica *prometheus.Desc @@ -57,11 +57,11 @@ type perfDataCounterValuesAvailabilityReplica struct { func (c *Collector) buildAvailabilityReplica() error { var err error - c.availabilityReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.availabilityReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.availabilityReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Availability Replica"), pdh.InstancesAll) + c.availabilityReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance.name, err)) } @@ -130,7 +130,7 @@ func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) erro return c.collect(ch, subCollectorAvailabilityReplica, c.availabilityReplicaPerfDataCollectors, c.collectAvailabilityReplicaInstance) } -func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.availabilityReplicaPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), err) @@ -141,63 +141,63 @@ func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metr c.availReplicaBytesReceivedFromReplica, prometheus.CounterValue, data.AvailReplicaBytesReceivedFromReplicaPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaBytesSentToReplica, prometheus.CounterValue, data.AvailReplicaBytesSentToReplicaPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaBytesSentToTransport, prometheus.CounterValue, data.AvailReplicaBytesSentToTransportPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaFlowControl, prometheus.CounterValue, data.AvailReplicaFlowControlPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaFlowControlTimeMS, prometheus.CounterValue, utils.MilliSecToSec(data.AvailReplicaFlowControlTimeMSPerSec), - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaReceivesFromReplica, prometheus.CounterValue, data.AvailReplicaReceivesFromReplicaPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaResentMessages, prometheus.CounterValue, data.AvailReplicaResentMessagesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaSendsToReplica, prometheus.CounterValue, data.AvailReplicaSendsToReplicaPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.availReplicaSendsToTransport, prometheus.CounterValue, data.AvailReplicaSendsToTransportPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) } diff --git a/internal/collector/mssql/mssql_buffer_manager.go b/internal/collector/mssql/mssql_buffer_manager.go index e378738c..2c040d9f 100644 --- a/internal/collector/mssql/mssql_buffer_manager.go +++ b/internal/collector/mssql/mssql_buffer_manager.go @@ -25,7 +25,7 @@ import ( ) type collectorBufferManager struct { - bufManPerfDataCollectors map[string]*pdh.Collector + bufManPerfDataCollectors map[mssqlInstance]*pdh.Collector bufManPerfDataObject []perfDataCounterValuesBufMan bufManBackgroundwriterpages *prometheus.Desc @@ -82,11 +82,11 @@ type perfDataCounterValuesBufMan struct { func (c *Collector) buildBufferManager() error { var err error - c.bufManPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.bufManPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.bufManPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Buffer Manager"), nil) + c.bufManPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil) if err != nil { errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance.name, err)) } @@ -238,7 +238,7 @@ func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorBufferManager, c.bufManPerfDataCollectors, c.collectBufferManagerInstance) } -func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.bufManPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), err) @@ -249,161 +249,161 @@ func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sq c.bufManBackgroundwriterpages, prometheus.CounterValue, data.BufManBackgroundWriterPagesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManBuffercachehits, prometheus.GaugeValue, data.BufManBufferCacheHitRatio, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManBuffercachelookups, prometheus.GaugeValue, data.BufManBufferCacheHitRatioBase, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManCheckpointpages, prometheus.CounterValue, data.BufManCheckpointPagesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManDatabasepages, prometheus.GaugeValue, data.BufManDatabasePages, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensionallocatedpages, prometheus.GaugeValue, data.BufManExtensionAllocatedPages, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensionfreepages, prometheus.GaugeValue, data.BufManExtensionFreePages, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensioninuseaspercentage, prometheus.GaugeValue, data.BufManExtensionInUseAsPercentage, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensionoutstandingIOcounter, prometheus.GaugeValue, data.BufManExtensionOutstandingIOCounter, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensionpageevictions, prometheus.CounterValue, data.BufManExtensionPageEvictionsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensionpagereads, prometheus.CounterValue, data.BufManExtensionPageReadsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensionpageunreferencedtime, prometheus.GaugeValue, data.BufManExtensionPageUnreferencedTime, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManExtensionpagewrites, prometheus.CounterValue, data.BufManExtensionPageWritesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManFreeliststalls, prometheus.CounterValue, data.BufManFreeListStallsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManIntegralControllerSlope, prometheus.GaugeValue, data.BufManIntegralControllerSlope, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManLazywrites, prometheus.CounterValue, data.BufManLazyWritesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManPagelifeexpectancy, prometheus.GaugeValue, data.BufManPageLifeExpectancy, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManPagelookups, prometheus.CounterValue, data.BufManPageLookupsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManPagereads, prometheus.CounterValue, data.BufManPageReadsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManPagewrites, prometheus.CounterValue, data.BufManPageWritesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManReadaheadpages, prometheus.CounterValue, data.BufManReadaheadPagesPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManReadaheadtime, prometheus.CounterValue, data.BufManReadaheadTimePerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.bufManTargetpages, prometheus.GaugeValue, data.BufManTargetPages, - sqlInstance, + sqlInstance.name, ) } diff --git a/internal/collector/mssql/mssql_database.go b/internal/collector/mssql/mssql_database.go index a1cda523..eaf05267 100644 --- a/internal/collector/mssql/mssql_database.go +++ b/internal/collector/mssql/mssql_database.go @@ -25,8 +25,8 @@ import ( ) type collectorDatabases struct { - databasesPerfDataCollectors map[string]*pdh.Collector - databasesPerfDataCollectors2019 map[string]*pdh.Collector + databasesPerfDataCollectors map[mssqlInstance]*pdh.Collector + databasesPerfDataCollectors2019 map[mssqlInstance]*pdh.Collector databasesPerfDataObject []perfDataCounterValuesDatabases databasesPerfDataObject2019 []perfDataCounterValuesDatabases2019 @@ -141,18 +141,18 @@ type perfDataCounterValuesDatabases2019 struct { func (c *Collector) buildDatabases() error { var err error - c.databasesPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) - c.databasesPerfDataCollectors2019 = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.databasesPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) + c.databasesPerfDataCollectors2019 = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.databasesPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll) + c.databasesPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance.name, err)) } if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) { - c.databasesPerfDataCollectors2019[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Databases"), pdh.InstancesAll) + c.databasesPerfDataCollectors2019[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create Databases 2019 collector for instance %s: %w", sqlInstance.name, err)) } @@ -458,7 +458,7 @@ func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error { ) } -func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.databasesPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err) @@ -469,336 +469,336 @@ func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlIns c.databasesActiveTransactions, prometheus.GaugeValue, data.DatabasesActiveTransactions, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesBackupPerRestoreThroughput, prometheus.CounterValue, data.DatabasesBackupPerRestoreThroughputPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesBulkCopyRows, prometheus.CounterValue, data.DatabasesBulkCopyRowsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesBulkCopyThroughput, prometheus.CounterValue, data.DatabasesBulkCopyThroughputPerSec*1024, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesCommitTableEntries, prometheus.GaugeValue, data.DatabasesCommitTableEntries, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesDataFilesSizeKB, prometheus.GaugeValue, data.DatabasesDataFilesSizeKB*1024, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesDBCCLogicalScanBytes, prometheus.CounterValue, data.DatabasesDBCCLogicalScanBytesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesGroupCommitTime, prometheus.CounterValue, data.DatabasesGroupCommitTimePerSec/1000000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogBytesFlushed, prometheus.CounterValue, data.DatabasesLogBytesFlushedPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogCacheHits, prometheus.GaugeValue, data.DatabasesLogCacheHitRatio, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogCacheLookups, prometheus.GaugeValue, data.DatabasesLogCacheHitRatioBase, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogCacheReads, prometheus.CounterValue, data.DatabasesLogCacheReadsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogFilesSizeKB, prometheus.GaugeValue, data.DatabasesLogFilesSizeKB*1024, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogFilesUsedSizeKB, prometheus.GaugeValue, data.DatabasesLogFilesUsedSizeKB*1024, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogFlushes, prometheus.CounterValue, data.DatabasesLogFlushesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogFlushWaits, prometheus.CounterValue, data.DatabasesLogFlushWaitsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogFlushWaitTime, prometheus.GaugeValue, data.DatabasesLogFlushWaitTime/1000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogFlushWriteTimeMS, prometheus.GaugeValue, data.DatabasesLogFlushWriteTimeMS/1000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogGrowths, prometheus.GaugeValue, data.DatabasesLogGrowths, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolCacheMisses, prometheus.CounterValue, data.DatabasesLogPoolCacheMissesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolDiskReads, prometheus.CounterValue, data.DatabasesLogPoolDiskReadsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolHashDeletes, prometheus.CounterValue, data.DatabasesLogPoolHashDeletesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolHashInserts, prometheus.CounterValue, data.DatabasesLogPoolHashInsertsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolInvalidHashEntry, prometheus.CounterValue, data.DatabasesLogPoolInvalidHashEntryPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolLogScanPushes, prometheus.CounterValue, data.DatabasesLogPoolLogScanPushesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolLogWriterPushes, prometheus.CounterValue, data.DatabasesLogPoolLogWriterPushesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolPushEmptyFreePool, prometheus.CounterValue, data.DatabasesLogPoolPushEmptyFreePoolPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolPushLowMemory, prometheus.CounterValue, data.DatabasesLogPoolPushLowMemoryPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolPushNoFreeBuffer, prometheus.CounterValue, data.DatabasesLogPoolPushNoFreeBufferPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolReqBehindTrunc, prometheus.CounterValue, data.DatabasesLogPoolReqBehindTruncPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolRequestsOldVLF, prometheus.CounterValue, data.DatabasesLogPoolRequestsOldVLFPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolRequests, prometheus.CounterValue, data.DatabasesLogPoolRequestsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolTotalActiveLogSize, prometheus.GaugeValue, data.DatabasesLogPoolTotalActiveLogSize, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogPoolTotalSharedPoolSize, prometheus.GaugeValue, data.DatabasesLogPoolTotalSharedPoolSize, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogShrinks, prometheus.GaugeValue, data.DatabasesLogShrinks, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesLogTruncations, prometheus.GaugeValue, data.DatabasesLogTruncations, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesPercentLogUsed, prometheus.GaugeValue, data.DatabasesPercentLogUsed, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesReplPendingXacts, prometheus.GaugeValue, data.DatabasesReplPendingXacts, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesReplTransRate, prometheus.CounterValue, data.DatabasesReplTransRate, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesShrinkDataMovementBytes, prometheus.CounterValue, data.DatabasesShrinkDataMovementBytesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesTrackedTransactions, prometheus.CounterValue, data.DatabasesTrackedTransactionsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesTransactions, prometheus.CounterValue, data.DatabasesTransactionsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesWriteTransactions, prometheus.CounterValue, data.DatabasesWriteTransactionsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesXTPControllerDLCLatencyPerFetch, prometheus.GaugeValue, data.DatabasesXTPControllerDLCLatencyPerFetch, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesXTPControllerDLCPeakLatency, prometheus.GaugeValue, data.DatabasesXTPControllerDLCPeakLatency*1000000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesXTPControllerLogProcessed, prometheus.CounterValue, data.DatabasesXTPControllerLogProcessedPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.databasesXTPMemoryUsedKB, prometheus.GaugeValue, data.DatabasesXTPMemoryUsedKB*1024, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) } return nil } -func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.databasesPerfDataObject2019) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err) @@ -809,7 +809,7 @@ func (c *Collector) collectDatabasesInstance2019(ch chan<- prometheus.Metric, sq c.databasesActiveParallelRedoThreads, prometheus.GaugeValue, data.DatabasesActiveParallelRedoThreads, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) } diff --git a/internal/collector/mssql/mssql_database_replica.go b/internal/collector/mssql/mssql_database_replica.go index c91a53cc..3d5d0f90 100644 --- a/internal/collector/mssql/mssql_database_replica.go +++ b/internal/collector/mssql/mssql_database_replica.go @@ -25,7 +25,7 @@ import ( ) type collectorDatabaseReplica struct { - dbReplicaPerfDataCollectors map[string]*pdh.Collector + dbReplicaPerfDataCollectors map[mssqlInstance]*pdh.Collector dbReplicaPerfDataObject []perfDataCounterValuesDBReplica dbReplicaDatabaseFlowControlDelay *prometheus.Desc @@ -86,11 +86,11 @@ type perfDataCounterValuesDBReplica struct { func (c *Collector) buildDatabaseReplica() error { var err error - c.dbReplicaPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.dbReplicaPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.dbReplicaPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Database Replica"), pdh.InstancesAll) + c.dbReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance.name, err)) } @@ -249,7 +249,7 @@ func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorDatabaseReplica, c.dbReplicaPerfDataCollectors, c.collectDatabaseReplicaInstance) } -func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.dbReplicaPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), err) @@ -260,168 +260,168 @@ func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, c.dbReplicaDatabaseFlowControlDelay, prometheus.GaugeValue, data.DbReplicaDatabaseFlowControlDelay, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaDatabaseFlowControls, prometheus.CounterValue, data.DbReplicaDatabaseFlowControlsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaFileBytesReceived, prometheus.CounterValue, data.DbReplicaFileBytesReceivedPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaGroupCommits, prometheus.CounterValue, data.DbReplicaGroupCommitsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaGroupCommitTime, prometheus.GaugeValue, data.DbReplicaGroupCommitTime, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogApplyPendingQueue, prometheus.GaugeValue, data.DbReplicaLogApplyPendingQueue, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogApplyReadyQueue, prometheus.GaugeValue, data.DbReplicaLogApplyReadyQueue, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogBytesCompressed, prometheus.CounterValue, data.DbReplicaLogBytesCompressedPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogBytesDecompressed, prometheus.CounterValue, data.DbReplicaLogBytesDecompressedPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogBytesReceived, prometheus.CounterValue, data.DbReplicaLogBytesReceivedPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogCompressionCachehits, prometheus.CounterValue, data.DbReplicaLogCompressionCacheHitsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogCompressionCachemisses, prometheus.CounterValue, data.DbReplicaLogCompressionCacheMissesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogCompressions, prometheus.CounterValue, data.DbReplicaLogCompressionsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogDecompressions, prometheus.CounterValue, data.DbReplicaLogDecompressionsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogremainingforundo, prometheus.GaugeValue, data.DbReplicaLogRemainingForUndo, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaLogSendQueue, prometheus.GaugeValue, data.DbReplicaLogSendQueue, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaMirroredWritetransactions, prometheus.CounterValue, data.DbReplicaMirroredWriteTransactionsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaRecoveryQueue, prometheus.GaugeValue, data.DbReplicaRecoveryQueue, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaRedoblocked, prometheus.CounterValue, data.DbReplicaRedoBlockedPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaRedoBytesRemaining, prometheus.GaugeValue, data.DbReplicaRedoBytesRemaining, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaRedoneBytes, prometheus.CounterValue, data.DbReplicaRedoneBytesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaRedones, prometheus.CounterValue, data.DbReplicaRedonesPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaTotalLogrequiringundo, prometheus.GaugeValue, data.DbReplicaTotalLogRequiringUndo, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.dbReplicaTransactionDelay, prometheus.GaugeValue, data.DbReplicaTransactionDelay/1000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) } diff --git a/internal/collector/mssql/mssql_general_statistics.go b/internal/collector/mssql/mssql_general_statistics.go index 3ed9b6c3..96229923 100644 --- a/internal/collector/mssql/mssql_general_statistics.go +++ b/internal/collector/mssql/mssql_general_statistics.go @@ -25,7 +25,7 @@ import ( ) type collectorGeneralStatistics struct { - genStatsPerfDataCollectors map[string]*pdh.Collector + genStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector genStatsPerfDataObject []perfDataCounterValuesGenStats genStatsActiveTempTables *prometheus.Desc @@ -84,11 +84,11 @@ type perfDataCounterValuesGenStats struct { func (c *Collector) buildGeneralStatistics() error { var err error - c.genStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.genStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.genStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "General Statistics"), nil) + c.genStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil) if err != nil { errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance.name, err)) } @@ -247,7 +247,7 @@ func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error return c.collect(ch, subCollectorGeneralStatistics, c.genStatsPerfDataCollectors, c.collectGeneralStatisticsInstance) } -func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.genStatsPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err) @@ -257,168 +257,168 @@ func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric c.genStatsActiveTempTables, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsActiveTempTables, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsConnectionReset, prometheus.CounterValue, c.genStatsPerfDataObject[0].GenStatsConnectionResetPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsEventNotificationsDelayedDrop, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsEventNotificationsDelayedDrop, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsHTTPAuthenticatedRequests, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsHTTPAuthenticatedRequests, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsLogicalConnections, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsLogicalConnections, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsLogins, prometheus.CounterValue, c.genStatsPerfDataObject[0].GenStatsLoginsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsLogouts, prometheus.CounterValue, c.genStatsPerfDataObject[0].GenStatsLogoutsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsMarsDeadlocks, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsMarsDeadlocks, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsNonAtomicYieldRate, prometheus.CounterValue, c.genStatsPerfDataObject[0].GenStatsNonatomicYieldRate, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsProcessesBlocked, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsProcessesBlocked, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsSOAPEmptyRequests, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsSOAPEmptyRequests, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsSOAPMethodInvocations, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsSOAPMethodInvocations, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsSOAPSessionInitiateRequests, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsSOAPSessionInitiateRequests, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsSOAPSessionTerminateRequests, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsSOAPSessionTerminateRequests, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsSOAPSQLRequests, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsSOAPSQLRequests, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsSOAPWSDLRequests, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsSOAPWSDLRequests, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsSQLTraceIOProviderLockWaits, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsSQLTraceIOProviderLockWaits, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsTempDBRecoveryUnitID, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsTempdbRecoveryUnitID, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsTempDBrowSetID, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsTempdbRowsetID, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsTempTablesCreationRate, prometheus.CounterValue, c.genStatsPerfDataObject[0].GenStatsTempTablesCreationRate, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsTempTablesForDestruction, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsTempTablesForDestruction, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsTraceEventNotificationQueue, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsTraceEventNotificationQueue, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsTransactions, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsTransactions, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.genStatsUserConnections, prometheus.GaugeValue, c.genStatsPerfDataObject[0].GenStatsUserConnections, - sqlInstance, + sqlInstance.name, ) return nil diff --git a/internal/collector/mssql/mssql_locks.go b/internal/collector/mssql/mssql_locks.go index 8b06377b..5d31bb16 100644 --- a/internal/collector/mssql/mssql_locks.go +++ b/internal/collector/mssql/mssql_locks.go @@ -25,7 +25,7 @@ import ( ) type collectorLocks struct { - locksPerfDataCollectors map[string]*pdh.Collector + locksPerfDataCollectors map[mssqlInstance]*pdh.Collector locksPerfDataObject []perfDataCounterValuesLocks // Win32_PerfRawData_{instance}_SQLServerLocks @@ -55,11 +55,11 @@ type perfDataCounterValuesLocks struct { func (c *Collector) buildLocks() error { var err error - c.locksPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.locksPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.locksPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Locks"), pdh.InstancesAll) + c.locksPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Locks"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance.name, err)) } @@ -121,7 +121,7 @@ func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorLocks, c.locksPerfDataCollectors, c.collectLocksInstance) } -func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.locksPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Locks"), err) @@ -132,56 +132,56 @@ func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstanc c.locksWaitTime, prometheus.GaugeValue, data.LocksAverageWaitTimeMS/1000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.locksCount, prometheus.GaugeValue, data.LocksAverageWaitTimeMSBase/1000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.locksLockRequests, prometheus.CounterValue, data.LocksLockRequestsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.locksLockTimeouts, prometheus.CounterValue, data.LocksLockTimeoutsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.locksLockTimeoutstimeout0, prometheus.CounterValue, data.LocksLockTimeoutsTimeout0PerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.locksLockWaits, prometheus.CounterValue, data.LocksLockWaitsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.locksLockWaitTimeMS, prometheus.GaugeValue, data.LocksLockWaitTimeMS/1000.0, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.locksNumberOfDeadlocks, prometheus.CounterValue, data.LocksNumberOfDeadlocksPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) } diff --git a/internal/collector/mssql/mssql_memory_manager.go b/internal/collector/mssql/mssql_memory_manager.go index 6f1cf2ac..3573a9d5 100644 --- a/internal/collector/mssql/mssql_memory_manager.go +++ b/internal/collector/mssql/mssql_memory_manager.go @@ -25,7 +25,7 @@ import ( ) type collectorMemoryManager struct { - memMgrPerfDataCollectors map[string]*pdh.Collector + memMgrPerfDataCollectors map[mssqlInstance]*pdh.Collector memMgrPerfDataObject []perfDataCounterValuesMemMgr memMgrConnectionMemoryKB *prometheus.Desc @@ -76,11 +76,11 @@ type perfDataCounterValuesMemMgr struct { func (c *Collector) buildMemoryManager() error { var err error - c.memMgrPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.memMgrPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.memMgrPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Memory Manager"), pdh.InstancesAll) + c.memMgrPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance.name, err)) } @@ -214,7 +214,7 @@ func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorMemoryManager, c.memMgrPerfDataCollectors, c.collectMemoryManagerInstance) } -func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.memMgrPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err) @@ -224,140 +224,140 @@ func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sq c.memMgrConnectionMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrConnectionMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrDatabaseCacheMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrDatabaseCacheMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrExternalBenefitOfMemory, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrExternalBenefitOfMemory, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrFreeMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrFreeMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrGrantedWorkspaceMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrGrantedWorkspaceMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrLockBlocks, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrLockBlocks, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrLockBlocksAllocated, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrLockBlocksAllocated, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrLockMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrLockMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrLockOwnerBlocks, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocks, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrLockOwnerBlocksAllocated, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrLockOwnerBlocksAllocated, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrLogPoolMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrLogPoolMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrMaximumWorkspaceMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrMaximumWorkspaceMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrMemoryGrantsOutstanding, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrMemoryGrantsOutstanding, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrMemoryGrantsPending, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrMemoryGrantsPending, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrOptimizerMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrOptimizerMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrReservedServerMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrReservedServerMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrSQLCacheMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrSQLCacheMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrStolenServerMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrStolenServerMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrTargetServerMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrTargetServerMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.memMgrTotalServerMemoryKB, prometheus.GaugeValue, c.memMgrPerfDataObject[0].MemMgrTotalServerMemoryKB*1024, - sqlInstance, + sqlInstance.name, ) return nil diff --git a/internal/collector/mssql/mssql_sql_errors.go b/internal/collector/mssql/mssql_sql_errors.go index 70c66af3..2dd66790 100644 --- a/internal/collector/mssql/mssql_sql_errors.go +++ b/internal/collector/mssql/mssql_sql_errors.go @@ -25,7 +25,7 @@ import ( ) type collectorSQLErrors struct { - sqlErrorsPerfDataCollectors map[string]*pdh.Collector + sqlErrorsPerfDataCollectors map[mssqlInstance]*pdh.Collector sqlErrorsPerfDataObject []perfDataCounterValuesSqlErrors // Win32_PerfRawData_{instance}_SQLServerSQLErrors @@ -41,11 +41,11 @@ type perfDataCounterValuesSqlErrors struct { func (c *Collector) buildSQLErrors() error { var err error - c.sqlErrorsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.sqlErrorsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.sqlErrorsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Errors"), pdh.InstancesAll) + c.sqlErrorsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance.name, err)) } @@ -66,7 +66,7 @@ func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorSQLErrors, c.sqlErrorsPerfDataCollectors, c.collectSQLErrorsInstance) } -func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.sqlErrorsPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), err) @@ -77,7 +77,7 @@ func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlIns c.sqlErrorsTotal, prometheus.CounterValue, data.SqlErrorsErrorsPerSec, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) } diff --git a/internal/collector/mssql/mssql_sql_stats.go b/internal/collector/mssql/mssql_sql_stats.go index 722fec17..6dd1152c 100644 --- a/internal/collector/mssql/mssql_sql_stats.go +++ b/internal/collector/mssql/mssql_sql_stats.go @@ -25,7 +25,7 @@ import ( ) type collectorSQLStats struct { - sqlStatsPerfDataCollectors map[string]*pdh.Collector + sqlStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector sqlStatsPerfDataObject []perfDataCounterValuesSqlStats sqlStatsAutoParamAttempts *prometheus.Desc @@ -58,11 +58,11 @@ type perfDataCounterValuesSqlStats struct { func (c *Collector) buildSQLStats() error { var err error - c.sqlStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.sqlStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.sqlStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "SQL Statistics"), nil) + c.sqlStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), nil) if err != nil { errs = append(errs, fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance.name, err)) } @@ -142,7 +142,7 @@ func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorSQLStats, c.sqlStatsPerfDataCollectors, c.collectSQLStatsInstance) } -func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.sqlStatsPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err) @@ -152,77 +152,77 @@ func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInst c.sqlStatsAutoParamAttempts, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsAutoParamAttemptsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsBatchRequests, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsBatchRequestsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsFailedAutoParams, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsFailedAutoParamsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsForcedParameterizations, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsForcedParameterizationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsGuidedplanexecutions, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsGuidedplanexecutionsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsMisguidedplanexecutions, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsMisguidedplanexecutionsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsSafeAutoParams, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsSafeAutoParamsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsSQLAttentionrate, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsSQLAttentionrate, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsSQLCompilations, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsSQLCompilationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsSQLReCompilations, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsSQLReCompilationsPerSec, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.sqlStatsUnsafeAutoParams, prometheus.CounterValue, c.sqlStatsPerfDataObject[0].SqlStatsUnsafeAutoParamsPerSec, - sqlInstance, + sqlInstance.name, ) return nil diff --git a/internal/collector/mssql/mssql_transactions.go b/internal/collector/mssql/mssql_transactions.go index 21387575..c84f2b5c 100644 --- a/internal/collector/mssql/mssql_transactions.go +++ b/internal/collector/mssql/mssql_transactions.go @@ -25,7 +25,7 @@ import ( ) type collectorTransactions struct { - transactionsPerfDataCollectors map[string]*pdh.Collector + transactionsPerfDataCollectors map[mssqlInstance]*pdh.Collector transactionsPerfDataObject []perfDataCounterValuesTransactions transactionsTempDbFreeSpaceBytes *prometheus.Desc @@ -62,11 +62,11 @@ type perfDataCounterValuesTransactions struct { func (c *Collector) buildTransactions() error { var err error - c.transactionsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.transactionsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.transactionsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Transactions"), nil) + c.transactionsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesTransactions](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), nil) if err != nil { errs = append(errs, fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance.name, err)) } @@ -160,7 +160,7 @@ func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error { // Win32_PerfRawData_MSSQLSERVER_Transactions docs: // - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object -func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.transactionsPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err) @@ -170,91 +170,91 @@ func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sql c.transactionsTempDbFreeSpaceBytes, prometheus.GaugeValue, c.transactionsPerfDataObject[0].TransactionsFreeSpaceintempdbKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsLongestTransactionRunningSeconds, prometheus.GaugeValue, c.transactionsPerfDataObject[0].TransactionsLongestTransactionRunningTime, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsNonSnapshotVersionActiveTotal, prometheus.CounterValue, c.transactionsPerfDataObject[0].TransactionsNonSnapshotVersionTransactions, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsSnapshotActiveTotal, prometheus.CounterValue, c.transactionsPerfDataObject[0].TransactionsSnapshotTransactions, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsActive, prometheus.GaugeValue, c.transactionsPerfDataObject[0].TransactionsTransactions, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsUpdateConflictsTotal, prometheus.CounterValue, c.transactionsPerfDataObject[0].TransactionsUpdateconflictratio, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsUpdateSnapshotActiveTotal, prometheus.CounterValue, c.transactionsPerfDataObject[0].TransactionsUpdateSnapshotTransactions, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsVersionCleanupRateBytes, prometheus.GaugeValue, c.transactionsPerfDataObject[0].TransactionsVersionCleanuprateKBPers*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsVersionGenerationRateBytes, prometheus.GaugeValue, c.transactionsPerfDataObject[0].TransactionsVersionGenerationrateKBPers*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsVersionStoreSizeBytes, prometheus.GaugeValue, c.transactionsPerfDataObject[0].TransactionsVersionStoreSizeKB*1024, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsVersionStoreUnits, prometheus.CounterValue, c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcount, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsVersionStoreCreationUnits, prometheus.CounterValue, c.transactionsPerfDataObject[0].TransactionsVersionStoreunitcreation, - sqlInstance, + sqlInstance.name, ) ch <- prometheus.MustNewConstMetric( c.transactionsVersionStoreTruncationUnits, prometheus.CounterValue, c.transactionsPerfDataObject[0].TransactionsVersionStoreunittruncation, - sqlInstance, + sqlInstance.name, ) return nil diff --git a/internal/collector/mssql/mssql_wait_stats.go b/internal/collector/mssql/mssql_wait_stats.go index 90fa6550..f6855fe5 100644 --- a/internal/collector/mssql/mssql_wait_stats.go +++ b/internal/collector/mssql/mssql_wait_stats.go @@ -25,7 +25,7 @@ import ( ) type collectorWaitStats struct { - waitStatsPerfDataCollectors map[string]*pdh.Collector + waitStatsPerfDataCollectors map[mssqlInstance]*pdh.Collector waitStatsPerfDataObject []perfDataCounterValuesWaitStats waitStatsLockWaits *prometheus.Desc @@ -62,11 +62,11 @@ type perfDataCounterValuesWaitStats struct { func (c *Collector) buildWaitStats() error { var err error - c.waitStatsPerfDataCollectors = make(map[string]*pdh.Collector, len(c.mssqlInstances)) + c.waitStatsPerfDataCollectors = make(map[mssqlInstance]*pdh.Collector, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances)) for _, sqlInstance := range c.mssqlInstances { - c.waitStatsPerfDataCollectors[sqlInstance.name], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance.name, "Wait Statistics"), pdh.InstancesAll) + c.waitStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesWaitStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), pdh.InstancesAll) if err != nil { errs = append(errs, fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance.name, err)) } @@ -153,7 +153,7 @@ func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error { return c.collect(ch, subCollectorWaitStats, c.waitStatsPerfDataCollectors, c.collectWaitStatsInstance) } -func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *pdh.Collector) error { +func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance mssqlInstance, perfDataCollector *pdh.Collector) error { err := perfDataCollector.Collect(&c.waitStatsPerfDataObject) if err != nil { return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), err) @@ -164,84 +164,84 @@ func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlIns c.waitStatsLockWaits, prometheus.CounterValue, data.WaitStatsLockWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsMemoryGrantQueueWaits, prometheus.CounterValue, data.WaitStatsMemoryGrantQueueWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsThreadSafeMemoryObjectsWaits, prometheus.CounterValue, data.WaitStatsThreadSafeMemoryObjectsWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsLogWriteWaits, prometheus.CounterValue, data.WaitStatsLogWriteWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsLogBufferWaits, prometheus.CounterValue, data.WaitStatsLogBufferWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsNetworkIOWaits, prometheus.CounterValue, data.WaitStatsNetworkIOWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsPageIOLatchWaits, prometheus.CounterValue, data.WaitStatsPageIOLatchWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsPageLatchWaits, prometheus.CounterValue, data.WaitStatsPageLatchWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsNonPageLatchWaits, prometheus.CounterValue, data.WaitStatsNonpageLatchWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsWaitForTheWorkerWaits, prometheus.CounterValue, data.WaitStatsWaitForTheWorkerWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsWorkspaceSynchronizationWaits, prometheus.CounterValue, data.WaitStatsWorkspaceSynchronizationWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) ch <- prometheus.MustNewConstMetric( c.waitStatsTransactionOwnershipWaits, prometheus.CounterValue, data.WaitStatsTransactionOwnershipWaits, - sqlInstance, data.Name, + sqlInstance.name, data.Name, ) } diff --git a/internal/collector/mssql/types.go b/internal/collector/mssql/types.go index 0f246ca5..5dc4710b 100644 --- a/internal/collector/mssql/types.go +++ b/internal/collector/mssql/types.go @@ -8,13 +8,14 @@ import ( ) type mssqlInstance struct { - name string - majorVersion mssqlServerMajorVersion - patchVersion string - edition string + name string + majorVersion mssqlServerMajorVersion + patchVersion string + edition string + isFirstInstance bool } -func newMssqlInstance(name string) (mssqlInstance, error) { +func newMssqlInstance(key, name string) (mssqlInstance, error) { regKey := fmt.Sprintf(`Software\Microsoft\Microsoft SQL Server\%s\Setup`, name) k, err := registry.OpenKey(registry.LOCAL_MACHINE, regKey, registry.QUERY_VALUE) @@ -39,10 +40,11 @@ func newMssqlInstance(name string) (mssqlInstance, error) { _, name, _ = strings.Cut(name, ".") return mssqlInstance{ - edition: edition, - name: name, - majorVersion: newMajorVersion(patchVersion), - patchVersion: patchVersion, + edition: edition, + name: name, + majorVersion: newMajorVersion(patchVersion), + patchVersion: patchVersion, + isFirstInstance: key == "MSSQLSERVER", }, nil } From 2c4698f1198cf30df4afd0fe73738f30ac4ffdb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Sun, 6 Apr 2025 03:36:58 +0200 Subject: [PATCH 10/14] [0.30] support web.listen-addr from CLI (#3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke Signed-off-by: Jan-Otto Kröpke --- cmd/windows_exporter/main.go | 54 +++------------------------ internal/config/config.go | 71 +++++++++++++++++++++++++++--------- 2 files changed, 58 insertions(+), 67 deletions(-) diff --git a/cmd/windows_exporter/main.go b/cmd/windows_exporter/main.go index e0f86fdd..1edb3e09 100644 --- a/cmd/windows_exporter/main.go +++ b/cmd/windows_exporter/main.go @@ -74,7 +74,7 @@ func run(ctx context.Context, args []string) int { "config.file", "YAML configuration file to use. Values set in this file will be overridden by CLI flags.", ).String() - insecureSkipVerify = app.Flag( + _ = app.Flag( "config.file.insecure-skip-verify", "Skip TLS verification in loading YAML configuration.", ).Default("false").Bool() @@ -125,11 +125,9 @@ func run(ctx context.Context, args []string) int { // Initialize collectors before loading and parsing CLI arguments collectors := collector.NewWithFlags(app) - // Load values from configuration file(s). Executable flags must first be parsed, in order - // to load the specified file(s). - if _, err := app.Parse(os.Args[1:]); err != nil { + if err := config.Parse(app, os.Args[1:]); err != nil { //nolint:sloglint // we do not have an logger yet - slog.LogAttrs(ctx, slog.LevelError, "Failed to parse CLI args", + slog.LogAttrs(ctx, slog.LevelError, "Failed to load configuration", slog.Any("err", err), ) @@ -137,58 +135,16 @@ func run(ctx context.Context, args []string) int { } debug.SetMemoryLimit(*memoryLimit) - logger, err := log.New(logConfig) if err != nil { logger.LogAttrs(ctx, slog.LevelError, "failed to create logger", slog.Any("err", err), ) - return 1 } - if *configFile != "" { - resolver, err := config.NewResolver(ctx, *configFile, logger, *insecureSkipVerify) - if err != nil { - logger.Error("could not load config file", - slog.Any("err", err), - ) - - return 1 - } - - if err = resolver.Bind(app, os.Args[1:]); err != nil { - logger.ErrorContext(ctx, "failed to bind configuration", - slog.Any("err", err), - ) - - return 1 - } - - // Parse flags once more to include those discovered in configuration file(s). - if _, err = app.Parse(os.Args[1:]); err != nil { - logger.ErrorContext(ctx, "failed to parse CLI args from YAML file", - slog.Any("err", err), - ) - - return 1 - } - - // NOTE: This is temporary fix for issue #1092, calling kingpin.Parse - // twice makes slices flags duplicate its value, this clean up - // the first parse before the second call. - slices.Sort(*webConfig.WebListenAddresses) - *webConfig.WebListenAddresses = slices.Clip(slices.Compact(*webConfig.WebListenAddresses)) - - logger, err = log.New(logConfig) - if err != nil { - //nolint:sloglint // we do not have an logger yet - slog.Error("failed to create logger", - slog.Any("err", err), - ) - - return 1 - } + if configFile != nil && *configFile != "" { + logger.InfoContext(ctx, "using configuration file: "+*configFile) } logger.LogAttrs(ctx, slog.LevelDebug, "logging has Started") diff --git a/internal/config/config.go b/internal/config/config.go index aec44748..ea714608 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -16,7 +16,6 @@ package config import ( - "context" "crypto/tls" "fmt" "io" @@ -38,8 +37,52 @@ type Resolver struct { flags map[string]string } -// NewResolver returns a Resolver structure. -func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) (*Resolver, error) { +// Parse parses the command line arguments and configuration files. +func Parse(app *kingpin.Application, args []string) error { + configFile := ParseConfigFile(args) + if configFile != "" { + resolver, err := NewConfigFileResolver(configFile) + if err != nil { + return fmt.Errorf("failed to load configuration file: %w", err) + } + + if err = resolver.Bind(app, args); err != nil { + return fmt.Errorf("failed to bind configuration: %w", err) + } + } + + if _, err := app.Parse(args); err != nil { + return fmt.Errorf("failed to parse flags: %w", err) + } + + return nil +} + +// ParseConfigFile manually parses the configuration file from the command line arguments. +func ParseConfigFile(args []string) string { + for i, cliFlag := range args { + if strings.HasPrefix(cliFlag, "--config.file=") { + return strings.TrimPrefix(cliFlag, "--config.file=") + } + + if strings.HasPrefix(cliFlag, "-config.file=") { + return strings.TrimPrefix(cliFlag, "-config.file=") + } + + if strings.HasSuffix(cliFlag, "-config.file") { + if len(os.Args) <= i+1 { + return "" + } + + return os.Args[i+1] + } + } + + return "" +} + +// NewConfigFileResolver returns a Resolver structure. +func NewConfigFileResolver(file string) (*Resolver, error) { flags := map[string]string{} var ( @@ -48,14 +91,14 @@ func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecure ) if strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") { - logger.WarnContext(ctx, "Loading configuration file from URL is deprecated and will be removed in 0.31.0. Use a local file instead.") + slog.Warn("Loading configuration file from URL is deprecated and will be removed in 0.31.0. Use a local file instead.") - fileBytes, err = readFromURL(ctx, file, logger, insecureSkipVerify) + fileBytes, err = readFromURL(file) if err != nil { return nil, err } } else { - fileBytes, err = readFromFile(ctx, file, logger) + fileBytes, err = readFromFile(file) if err != nil { return nil, err } @@ -79,9 +122,7 @@ func NewResolver(ctx context.Context, file string, logger *slog.Logger, insecure return &Resolver{flags: flags}, nil } -func readFromFile(ctx context.Context, file string, logger *slog.Logger) ([]byte, error) { - logger.InfoContext(ctx, "loading configuration file: "+file) - +func readFromFile(file string) ([]byte, error) { if _, err := os.Stat(file); err != nil { return nil, fmt.Errorf("failed to read configuration file: %w", err) } @@ -94,20 +135,14 @@ func readFromFile(ctx context.Context, file string, logger *slog.Logger) ([]byte return fileBytes, nil } -func readFromURL(ctx context.Context, file string, logger *slog.Logger, insecureSkipVerify bool) ([]byte, error) { - logger.InfoContext(ctx, "loading configuration file from URL: "+file) - +func readFromURL(file string) ([]byte, error) { tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, //nolint:gosec - } - - if insecureSkipVerify { - logger.WarnContext(ctx, "Loading configuration file with TLS verification disabled") + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec } client := &http.Client{Transport: tr} - req, err := http.NewRequestWithContext(ctx, http.MethodGet, file, nil) + req, err := http.NewRequest(http.MethodGet, file, nil) if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %w", err) } From 13d5e1cd12e5e47228ca268256ffcb0ef314919c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Sun, 6 Apr 2025 11:55:38 +0200 Subject: [PATCH 11/14] chore: CI fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke --- .github/workflows/lint.yml | 6 ++---- internal/collector/net/net.go | 13 +++++++++---- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 66ea5420..fad698f2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -8,11 +8,9 @@ on: - master - next - main + - "0.*" + - "1.*" pull_request: - branches: - - master - - next - - main env: VERSION_PROMU: '0.14.0' diff --git a/internal/collector/net/net.go b/internal/collector/net/net.go index e3af2a39..efad8e63 100644 --- a/internal/collector/net/net.go +++ b/internal/collector/net/net.go @@ -33,7 +33,12 @@ import ( "golang.org/x/sys/windows" ) -const Name = "net" +const ( + Name = "net" + + subCollectorMetrics = "metrics" + subCollectorNicInfo = "nic_addresses" +) type Config struct { NicExclude *regexp.Regexp `yaml:"nic_exclude"` @@ -46,8 +51,8 @@ var ConfigDefaults = Config{ NicExclude: types.RegExpEmpty, NicInclude: types.RegExpAny, CollectorsEnabled: []string{ - "metrics", - "nic_addresses", + subCollectorMetrics, + subCollectorNicInfo, }, } @@ -284,7 +289,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error { } if slices.Contains(c.config.CollectorsEnabled, subCollectorNicInfo) { - if err := c.collectNICInfo(ch); err != nil { + if err := c.collectNICAddresses(ch); err != nil { errs = append(errs, fmt.Errorf("failed collecting net addresses: %w", err)) } } From aa7157e27c4197f1a6fafa1ba68b91ef6f7024a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Sun, 6 Apr 2025 11:57:14 +0200 Subject: [PATCH 12/14] system: Metric `windows_system_boot_time_timestamp` returns a UNIX timestamp again. (#1967) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke Signed-off-by: Jan-Otto Kröpke (cherry picked from commit ba605cffcce3503645bb57542bb46662d23244d1) --- docs/collector.system.md | 4 +-- internal/collector/system/system.go | 37 +++++++++++++++++++++------ internal/collector/system/types.go | 1 - internal/headers/kernel32/kernel32.go | 15 ++++++++--- tools/e2e-output.txt | 4 ++- 5 files changed, 45 insertions(+), 16 deletions(-) diff --git a/docs/collector.system.md b/docs/collector.system.md index 937c6734..438539d1 100644 --- a/docs/collector.system.md +++ b/docs/collector.system.md @@ -16,7 +16,7 @@ None | Name | Description | Type | Labels | |----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|--------| -| `windows_system_boot_time_timestamp_seconds` | Unix timestamp of last system boot | gauge | None | +| `windows_system_boot_time_timestamp` | Unix timestamp of last system boot | gauge | None | | `windows_system_context_switches_total` | Total number of [context switches](https://en.wikipedia.org/wiki/Context_switch) | counter | None | | `windows_system_exception_dispatches_total` | Total exceptions dispatched by the system | counter | None | | `windows_system_processes` | Number of process contexts currently loaded or running on the operating system | gauge | None | @@ -41,7 +41,7 @@ windows_system_processes{instance="localhost"} ## Useful queries Find hosts that have rebooted in the last 24 hours ``` -time() - windows_system_boot_time_timestamp_seconds < 86400 +time() - windows_system_boot_time_timestamp < 86400 ``` ## Alerting examples diff --git a/internal/collector/system/system.go b/internal/collector/system/system.go index c3818929..b574fcc0 100644 --- a/internal/collector/system/system.go +++ b/internal/collector/system/system.go @@ -18,8 +18,10 @@ package system import ( "fmt" "log/slog" + "time" "github.com/alecthomas/kingpin/v2" + "github.com/prometheus-community/windows_exporter/internal/headers/kernel32" "github.com/prometheus-community/windows_exporter/internal/mi" "github.com/prometheus-community/windows_exporter/internal/pdh" "github.com/prometheus-community/windows_exporter/internal/types" @@ -37,6 +39,8 @@ var ConfigDefaults = Config{} type Collector struct { config Config + bootTimeTimestamp float64 + perfDataCollector *pdh.Collector perfDataObject []perfDataCounterValues @@ -46,8 +50,10 @@ type Collector struct { processes *prometheus.Desc processesLimit *prometheus.Desc systemCallsTotal *prometheus.Desc - bootTime *prometheus.Desc - threads *prometheus.Desc + // Deprecated: Use windows_system_boot_time_timestamp instead + bootTimeSeconds *prometheus.Desc + bootTime *prometheus.Desc + threads *prometheus.Desc } func New(config *Config) *Collector { @@ -78,11 +84,17 @@ func (c *Collector) Close() error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { c.bootTime = prometheus.NewDesc( - prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"), + prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp"), "Unix timestamp of system boot time", nil, nil, ) + c.bootTimeSeconds = prometheus.NewDesc( + prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"), + "Deprecated: Use windows_system_boot_time_timestamp instead", + nil, + nil, + ) c.contextSwitchesTotal = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "context_switches_total"), "Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)", @@ -127,6 +139,8 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { nil, ) + c.bootTimeTimestamp = float64(time.Now().Unix() - int64(kernel32.GetTickCount64()/1000)) + var err error c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil) @@ -170,17 +184,24 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error { prometheus.CounterValue, c.perfDataObject[0].SystemCallsPerSec, ) - ch <- prometheus.MustNewConstMetric( - c.bootTime, - prometheus.GaugeValue, - c.perfDataObject[0].SystemUpTime, - ) ch <- prometheus.MustNewConstMetric( c.threads, prometheus.GaugeValue, c.perfDataObject[0].Threads, ) + ch <- prometheus.MustNewConstMetric( + c.bootTimeSeconds, + prometheus.GaugeValue, + c.bootTimeTimestamp, + ) + + ch <- prometheus.MustNewConstMetric( + c.bootTime, + prometheus.GaugeValue, + c.bootTimeTimestamp, + ) + // Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value. // https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824 // https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem diff --git a/internal/collector/system/types.go b/internal/collector/system/types.go index 03c156fa..b6336a97 100644 --- a/internal/collector/system/types.go +++ b/internal/collector/system/types.go @@ -20,7 +20,6 @@ type perfDataCounterValues struct { ExceptionDispatchesPerSec float64 `perfdata:"Exception Dispatches/sec"` ProcessorQueueLength float64 `perfdata:"Processor Queue Length"` SystemCallsPerSec float64 `perfdata:"System Calls/sec"` - SystemUpTime float64 `perfdata:"System Up Time"` Processes float64 `perfdata:"Processes"` Threads float64 `perfdata:"Threads"` } diff --git a/internal/headers/kernel32/kernel32.go b/internal/headers/kernel32/kernel32.go index 092f9878..07ba6f73 100644 --- a/internal/headers/kernel32/kernel32.go +++ b/internal/headers/kernel32/kernel32.go @@ -23,10 +23,11 @@ import ( //nolint:gochecknoglobals var ( - kernel32 = windows.NewLazySystemDLL("kernel32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - procGetDynamicTimeZoneInformationSys = kernel32.NewProc("GetDynamicTimeZoneInformation") - kernelLocalFileTimeToFileTime = kernel32.NewProc("LocalFileTimeToFileTime") + procGetDynamicTimeZoneInformationSys = modkernel32.NewProc("GetDynamicTimeZoneInformation") + procKernelLocalFileTimeToFileTime = modkernel32.NewProc("LocalFileTimeToFileTime") + procGetTickCount = modkernel32.NewProc("GetTickCount64") ) // SYSTEMTIME contains a date and time. @@ -70,9 +71,15 @@ func GetDynamicTimeZoneInformation() (DynamicTimezoneInformation, error) { } func LocalFileTimeToFileTime(localFileTime, utcFileTime *windows.Filetime) uint32 { - ret, _, _ := kernelLocalFileTimeToFileTime.Call( + ret, _, _ := procKernelLocalFileTimeToFileTime.Call( uintptr(unsafe.Pointer(localFileTime)), uintptr(unsafe.Pointer(utcFileTime))) return uint32(ret) } + +func GetTickCount64() uint64 { + ret, _, _ := procGetTickCount.Call() + + return uint64(ret) +} diff --git a/tools/e2e-output.txt b/tools/e2e-output.txt index 7b4e6f65..fcd2f3bc 100644 --- a/tools/e2e-output.txt +++ b/tools/e2e-output.txt @@ -389,7 +389,9 @@ windows_service_state{name="Themes",state="running"} 1 windows_service_state{name="Themes",state="start pending"} 0 windows_service_state{name="Themes",state="stop pending"} 0 windows_service_state{name="Themes",state="stopped"} 0 -# HELP windows_system_boot_time_timestamp_seconds Unix timestamp of system boot time +# HELP windows_system_boot_time_timestamp Unix timestamp of system boot time +# TYPE windows_system_boot_time_timestamp gauge +# HELP windows_system_boot_time_timestamp_seconds Deprecated: Use windows_system_boot_time_timestamp instead # TYPE windows_system_boot_time_timestamp_seconds gauge # HELP windows_system_context_switches_total Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec) # TYPE windows_system_context_switches_total counter From 9ed37697655a755bd0dad1fca3272c12e3465d98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Sun, 6 Apr 2025 12:08:43 +0200 Subject: [PATCH 13/14] chore: CI fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke --- cmd/windows_exporter/main_test.go | 2 +- internal/collector/textfile/textfile_test_test.go | 5 +++-- internal/config/config.go | 4 +++- internal/utils/testutils/testutils.go | 3 ++- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/cmd/windows_exporter/main_test.go b/cmd/windows_exporter/main_test.go index af81f454..e3216d54 100644 --- a/cmd/windows_exporter/main_test.go +++ b/cmd/windows_exporter/main_test.go @@ -72,7 +72,7 @@ func TestRun(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(t.Context()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() if tc.config != "" { diff --git a/internal/collector/textfile/textfile_test_test.go b/internal/collector/textfile/textfile_test_test.go index e71f8fac..60c5e937 100644 --- a/internal/collector/textfile/textfile_test_test.go +++ b/internal/collector/textfile/textfile_test_test.go @@ -16,6 +16,7 @@ package textfile_test import ( + "context" "fmt" "io" "log/slog" @@ -44,7 +45,7 @@ func TestMultipleDirectories(t *testing.T) { }) collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector}) - require.NoError(t, collectors.Build(t.Context(), logger)) + require.NoError(t, collectors.Build(context.Background(), logger)) metrics := make(chan prometheus.Metric) got := "" @@ -81,7 +82,7 @@ func TestDuplicateFileName(t *testing.T) { }) collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector}) - require.NoError(t, collectors.Build(t.Context(), logger)) + require.NoError(t, collectors.Build(context.Background(), logger)) metrics := make(chan prometheus.Metric) got := "" diff --git a/internal/config/config.go b/internal/config/config.go index ea714608..4dcda129 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -16,6 +16,7 @@ package config import ( + "context" "crypto/tls" "fmt" "io" @@ -91,6 +92,7 @@ func NewConfigFileResolver(file string) (*Resolver, error) { ) if strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") { + //nolint:sloglint // we do not have an logger yet slog.Warn("Loading configuration file from URL is deprecated and will be removed in 0.31.0. Use a local file instead.") fileBytes, err = readFromURL(file) @@ -142,7 +144,7 @@ func readFromURL(file string) ([]byte, error) { client := &http.Client{Transport: tr} - req, err := http.NewRequest(http.MethodGet, file, nil) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, file, nil) if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %w", err) } diff --git a/internal/utils/testutils/testutils.go b/internal/utils/testutils/testutils.go index 75546132..bf7e3894 100644 --- a/internal/utils/testutils/testutils.go +++ b/internal/utils/testutils/testutils.go @@ -16,6 +16,7 @@ package testutils import ( + "context" "errors" "io" "log/slog" @@ -47,7 +48,7 @@ func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, co } collectors := collector.New(map[string]collector.Collector{name: c}) - require.NoError(b, collectors.Build(b.Context(), logger)) + require.NoError(b, collectors.Build(context.Background(), logger)) metrics := make(chan prometheus.Metric) From bdd7725f17b285a30ef569ef7fb5517da56445e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Sun, 6 Apr 2025 12:21:00 +0200 Subject: [PATCH 14/14] chore: CI fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jan-Otto Kröpke --- cmd/windows_exporter/main.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/windows_exporter/main.go b/cmd/windows_exporter/main.go index 1edb3e09..9b61732e 100644 --- a/cmd/windows_exporter/main.go +++ b/cmd/windows_exporter/main.go @@ -125,7 +125,8 @@ func run(ctx context.Context, args []string) int { // Initialize collectors before loading and parsing CLI arguments collectors := collector.NewWithFlags(app) - if err := config.Parse(app, os.Args[1:]); err != nil { + //nolint:contextcheck + if err := config.Parse(app, args); err != nil { //nolint:sloglint // we do not have an logger yet slog.LogAttrs(ctx, slog.LevelError, "Failed to load configuration", slog.Any("err", err), @@ -135,11 +136,13 @@ func run(ctx context.Context, args []string) int { } debug.SetMemoryLimit(*memoryLimit) + logger, err := log.New(logConfig) if err != nil { logger.LogAttrs(ctx, slog.LevelError, "failed to create logger", slog.Any("err", err), ) + return 1 }