pagefile: BREAKING: move paging metrics from os to dedicated collector (click PR for more information) (#1735)

Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
Jan-Otto Kröpke
2024-11-14 22:39:59 +01:00
committed by GitHub
parent df8513ab8e
commit 7a9a4e5831
13 changed files with 238 additions and 25 deletions

View File

@@ -34,9 +34,14 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
hostname *prometheus.Desc
osInformation *prometheus.Desc
pagingFreeBytes *prometheus.Desc
hostname *prometheus.Desc
osInformation *prometheus.Desc
// pagingFreeBytes
// Deprecated: Use windows_paging_free_bytes instead.
pagingFreeBytes *prometheus.Desc
// pagingLimitBytes
// Deprecated: Use windows_paging_total_bytes instead.
pagingLimitBytes *prometheus.Desc
// users
@@ -151,13 +156,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
)
c.pagingLimitBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "paging_limit_bytes"),
"OperatingSystem.SizeStoredInPagingFiles",
"Deprecated: Use windows_pagefile_limit_bytes instead.",
nil,
nil,
)
c.pagingFreeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "paging_free_bytes"),
"OperatingSystem.FreeSpaceInPagingFiles",
"Deprecated: Use windows_pagefile_free_bytes instead.",
nil,
nil,
)

View File

@@ -0,0 +1,5 @@
package pagefile
const (
usage = "% Usage"
)

View File

@@ -0,0 +1,136 @@
//go:build windows
package pagefile
import (
"fmt"
"log/slog"
"os"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/psapi"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "pagefile"
type Config struct{}
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics.
type Collector struct {
config Config
perfDataCollector perfdata.Collector
pagingFreeBytes *prometheus.Desc
pagingLimitBytes *prometheus.Desc
}
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
counters := []string{
usage,
}
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Paging File", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create Paging File collector: %w", err)
}
c.pagingLimitBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "limit_bytes"),
"Number of bytes that can be stored in the operating system paging files. 0 (zero) indicates that there are no paging files",
[]string{"file"},
nil,
)
c.pagingFreeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "free_bytes"),
"Number of bytes that can be mapped into the operating system paging files without causing any other pages to be swapped out",
[]string{"file"},
nil,
)
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
return c.collectPaging(ch)
}
func (c *Collector) collectPaging(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Paging File metrics: %w", err)
}
gpi, err := psapi.GetPerformanceInfo()
if err != nil {
return err
}
for fileName, pageFile := range data {
fileString := strings.ReplaceAll(fileName, `\??\`, "")
file, err := os.Stat(fileString)
var fileSize float64
// For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting.
if err == nil {
fileSize = float64(file.Size())
}
ch <- prometheus.MustNewConstMetric(
c.pagingFreeBytes,
prometheus.GaugeValue,
fileSize-(pageFile[usage].FirstValue*float64(gpi.PageSize)),
fileString,
)
ch <- prometheus.MustNewConstMetric(
c.pagingLimitBytes,
prometheus.GaugeValue,
fileSize,
fileString,
)
}
return nil
}

View File

@@ -0,0 +1,16 @@
package pagefile_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/pagefile"
"github.com/prometheus-community/windows_exporter/internal/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, pagefile.Name, pagefile.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, pagefile.New, nil)
}

View File

@@ -5,6 +5,7 @@ package v2
import (
"errors"
"fmt"
"slices"
"strings"
"unsafe"
@@ -14,9 +15,10 @@ import (
)
type Collector struct {
object string
counters map[string]Counter
handle pdhQueryHandle
object string
counters map[string]Counter
handle pdhQueryHandle
totalCounterRequested bool
}
type Counter struct {
@@ -39,9 +41,10 @@ func NewCollector(object string, instances []string, counters []string) (*Collec
}
collector := &Collector{
object: object,
counters: make(map[string]Counter, len(counters)),
handle: handle,
object: object,
counters: make(map[string]Counter, len(counters)),
handle: handle,
totalCounterRequested: slices.Contains(instances, "_Total"),
}
for _, counterName := range counters {
@@ -166,12 +169,10 @@ func (c *Collector) Collect() (map[string]map[string]perftypes.CounterValues, er
metricType = prometheus.GaugeValue
}
_, isTotalCounterRequests := c.counters["_Total"]
for _, item := range items {
if item.RawValue.CStatus == PdhCstatusValidData || item.RawValue.CStatus == PdhCstatusNewData {
instanceName := windows.UTF16PtrToString(item.SzName)
if strings.HasSuffix(instanceName, "_Total") && !isTotalCounterRequests {
if strings.HasSuffix(instanceName, "_Total") && !c.totalCounterRequested {
continue
}