remote_fx: refactor collector (#1738)

Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
Jan-Otto Kröpke
2024-11-15 19:34:00 +01:00
committed by GitHub
parent a4ec0a96f1
commit 78bd720e88
32 changed files with 278 additions and 204 deletions

View File

@@ -10,6 +10,7 @@ import (
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types" "github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@@ -115,6 +116,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }
@@ -268,7 +273,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DirectoryServices", perfdata.AllInstances, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DirectoryServices", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DirectoryServices collector: %w", err) return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
} }

View File

@@ -72,6 +72,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }
@@ -95,7 +99,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Certification Authority", perfdata.AllInstances, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Certification Authority", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err) return fmt.Errorf("failed to create Certification Authority collector: %w", err)
} }

View File

@@ -104,6 +104,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }
@@ -157,7 +161,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "AD FS", perfdata.AllInstances, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "AD FS", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err) return fmt.Errorf("failed to create AD FS collector: %w", err)
} }

View File

@@ -89,6 +89,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }
@@ -128,7 +132,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Cache", perfdata.AllInstances, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Cache", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err) return fmt.Errorf("failed to create Cache collector: %w", err)
} }

View File

@@ -76,6 +76,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }

View File

@@ -164,6 +164,20 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection.Close()
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder.Close()
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume.Close()
}
}
return nil return nil
} }
@@ -189,7 +203,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
sizeOfFilesReceivedTotal, sizeOfFilesReceivedTotal,
} }
c.perfDataCollectorConnection, err = perfdata.NewCollector(perfdata.V1, "DFS Replication Connections", perfdata.AllInstances, counters) c.perfDataCollectorConnection, err = perfdata.NewCollector(perfdata.V2, "DFS Replication Connections", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err) return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
} }
@@ -226,7 +240,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
updatesDroppedTotal, updatesDroppedTotal,
} }
c.perfDataCollectorFolder, err = perfdata.NewCollector(perfdata.V1, "DFS Replicated Folders", perfdata.AllInstances, counters) c.perfDataCollectorFolder, err = perfdata.NewCollector(perfdata.V2, "DFS Replicated Folders", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err) return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
} }
@@ -241,7 +255,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
usnJournalUnreadPercentage, usnJournalUnreadPercentage,
} }
c.perfDataCollectorVolume, err = perfdata.NewCollector(perfdata.V1, "DFS Replication Service Volumes", perfdata.AllInstances, counters) c.perfDataCollectorVolume, err = perfdata.NewCollector(perfdata.V2, "DFS Replication Service Volumes", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err) return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
} }

View File

@@ -85,12 +85,18 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() { if toggle.IsPDHEnabled() {
counters := []string{ var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DHCP Server", perfdata.AllInstances, []string{
acksTotal, acksTotal,
activeQueueLength, activeQueueLength,
conflictCheckQueueLength, conflictCheckQueueLength,
@@ -116,11 +122,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
packetsReceivedTotal, packetsReceivedTotal,
releasesTotal, releasesTotal,
requestsTotal, requestsTotal,
} })
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DHCP Server", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err) return fmt.Errorf("failed to create DHCP Server collector: %w", err)
} }

View File

@@ -76,11 +76,15 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
c.perfDataCollector.Close()
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
counters := []string{ var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DNS", perfdata.AllInstances, []string{
axfrRequestReceived, axfrRequestReceived,
axfrRequestSent, axfrRequestSent,
axfrResponseReceived, axfrResponseReceived,
@@ -121,11 +125,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
winsReverseResponseSent, winsReverseResponseSent,
zoneTransferFailure, zoneTransferFailure,
zoneTransferSOARequestSent, zoneTransferSOARequestSent,
} })
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DNS", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DNS collector: %w", err) return fmt.Errorf("failed to create DNS collector: %w", err)
} }

View File

@@ -33,7 +33,7 @@ func (c *Collector) buildActiveSync() error {
var err error var err error
c.perfDataCollectorActiveSync, err = perfdata.NewCollector(perfdata.V1, "MSExchange ActiveSync", perfdata.AllInstances, counters) c.perfDataCollectorActiveSync, err = perfdata.NewCollector(perfdata.V2, "MSExchange ActiveSync", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err) return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
} }

View File

@@ -42,7 +42,7 @@ func (c *Collector) buildADAccessProcesses() error {
var err error var err error
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector(perfdata.V1, "MSExchange ADAccess Processes", perfdata.AllInstances, counters) c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector(perfdata.V2, "MSExchange ADAccess Processes", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err) return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
} }

View File

@@ -23,7 +23,7 @@ func (c *Collector) buildAutoDiscover() error {
var err error var err error
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector(perfdata.V1, "MSExchange Autodiscover", perfdata.AllInstances, counters) c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector(perfdata.V2, "MSExchange Autodiscover", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err) return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
} }

View File

@@ -21,7 +21,7 @@ func (c *Collector) buildAvailabilityService() error {
var err error var err error
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector(perfdata.V1, "MSExchange Availability Service", perfdata.AllInstances, counters) c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector(perfdata.V2, "MSExchange Availability Service", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err) return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
} }

View File

@@ -44,7 +44,7 @@ func (c *Collector) buildHTTPProxy() error {
var err error var err error
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector(perfdata.V1, "MSExchange HttpProxy", perfdata.AllInstances, counters) c.perfDataCollectorHttpProxy, err = perfdata.NewCollector(perfdata.V2, "MSExchange HttpProxy", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err) return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
} }

View File

@@ -27,7 +27,7 @@ func (c *Collector) buildMapiHttpEmsmdb() error {
var err error var err error
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector(perfdata.V1, "MSExchange MapiHttp Emsmdb", perfdata.AllInstances, counters) c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector(perfdata.V2, "MSExchange MapiHttp Emsmdb", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err) return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
} }

View File

@@ -30,7 +30,7 @@ func (c *Collector) buildOWA() error {
var err error var err error
c.perfDataCollectorOWA, err = perfdata.NewCollector(perfdata.V1, "MSExchange OWA", perfdata.AllInstances, counters) c.perfDataCollectorOWA, err = perfdata.NewCollector(perfdata.V2, "MSExchange OWA", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err) return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
} }

View File

@@ -42,7 +42,7 @@ func (c *Collector) buildRPC() error {
var err error var err error
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector(perfdata.V1, "MSExchange RpcClientAccess", perfdata.AllInstances, counters) c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector(perfdata.V2, "MSExchange RpcClientAccess", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err) return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
} }

View File

@@ -51,7 +51,7 @@ func (c *Collector) buildTransportQueues() error {
var err error var err error
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector(perfdata.V1, "MSExchangeTransport Queues", perfdata.AllInstances, counters) c.perfDataCollectorTransportQueues, err = perfdata.NewCollector(perfdata.V2, "MSExchangeTransport Queues", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err) return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
} }

View File

@@ -42,7 +42,7 @@ func (c *Collector) buildWorkloadManagementWorkloads() error {
var err error var err error
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector(perfdata.V1, "MSExchange WorkloadManagement Workloads", perfdata.AllInstances, counters) c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector(perfdata.V2, "MSExchange WorkloadManagement Workloads", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err) return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
} }

View File

@@ -163,7 +163,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "LogicalDisk", perfdata.AllInstances, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "LogicalDisk", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err) return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
} }

View File

@@ -146,7 +146,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Memory", perfdata.AllInstances, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Memory", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err) return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
} }

View File

@@ -146,12 +146,18 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() { if toggle.IsPDHEnabled() {
counters := []string{ var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Network Interface", perfdata.AllInstances, []string{
BytesReceivedPerSec, BytesReceivedPerSec,
BytesSentPerSec, BytesSentPerSec,
BytesTotalPerSec, BytesTotalPerSec,
@@ -165,11 +171,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
PacketsReceivedUnknown, PacketsReceivedUnknown,
PacketsSentPerSec, PacketsSentPerSec,
CurrentBandwidth, CurrentBandwidth,
} })
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Network Interface", perfdata.AllInstances, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err) return fmt.Errorf("failed to create Processor Information collector: %w", err)
} }

View File

@@ -57,6 +57,8 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
c.perfDataCollector.Close()
return nil return nil
} }

View File

@@ -138,6 +138,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
return nil return nil
} }
@@ -194,7 +198,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
if errors.Is(err, v2.NewPdhError(v2.PdhCstatusNoObject)) { if errors.Is(err, v2.NewPdhError(v2.PdhCstatusNoObject)) {
counters[0] = idProcess counters[0] = idProcess
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Process", perfdata.AllInstances, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Process", perfdata.AllInstances, counters)
} }
if err != nil { if err != nil {

View File

@@ -0,0 +1,27 @@
package remote_fx
const (
BaseTCPRTT = "Base TCP RTT"
BaseUDPRTT = "Base UDP RTT"
CurrentTCPBandwidth = "Current TCP Bandwidth"
CurrentTCPRTT = "Current TCP RTT"
CurrentUDPBandwidth = "Current UDP Bandwidth"
CurrentUDPRTT = "Current UDP RTT"
TotalReceivedBytes = "Total Received Bytes"
TotalSentBytes = "Total Sent Bytes"
UDPPacketsReceivedPersec = "UDP Packets Received/sec"
UDPPacketsSentPersec = "UDP Packets Sent/sec"
FECRate = "Forward Error Correction (FEC) percentage"
LossRate = "Loss percentage"
RetransmissionRate = "Percentage of packets that have been retransmitted"
AverageEncodingTime = "Average Encoding Time"
FrameQuality = "Frame Quality"
FramesSkippedPerSecondInsufficientClientResources = "Frames Skipped/Second - Insufficient Server Resources"
FramesSkippedPerSecondInsufficientNetworkResources = "Frames Skipped/Second - Insufficient Network Resources"
FramesSkippedPerSecondInsufficientServerResources = "Frames Skipped/Second - Insufficient Client Resources"
GraphicsCompressionratio = "Graphics Compression ratio"
InputFramesPerSecond = "Input Frames/Second"
OutputFramesPerSecond = "Output Frames/Second"
SourceFramesPerSecond = "Source Frames/Second"
)

View File

@@ -3,12 +3,14 @@
package remote_fx package remote_fx
import ( import (
"errors"
"fmt"
"log/slog" "log/slog"
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi" "github.com/prometheus-community/windows_exporter/internal/mi"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1" "github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types" "github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils" "github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@@ -28,6 +30,9 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
perfDataCollectorNetwork perfdata.Collector
perfDataCollectorGraphics perfdata.Collector
// net // net
baseTCPRTT *prometheus.Desc baseTCPRTT *prometheus.Desc
baseUDPRTT *prometheus.Desc baseUDPRTT *prometheus.Desc
@@ -74,14 +79,53 @@ func (c *Collector) GetName() string {
} }
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{"RemoteFX Network", "RemoteFX Graphics"}, nil return []string{}, nil
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
c.perfDataCollectorNetwork.Close()
c.perfDataCollectorGraphics.Close()
return nil return nil
} }
func (c *Collector) Build(*slog.Logger, *mi.Session) error { func (c *Collector) Build(*slog.Logger, *mi.Session) error {
var err error
c.perfDataCollectorNetwork, err = perfdata.NewCollector(perfdata.V2, "RemoteFX Network", perfdata.AllInstances, []string{
BaseTCPRTT,
BaseUDPRTT,
CurrentTCPBandwidth,
CurrentTCPRTT,
CurrentUDPBandwidth,
CurrentUDPRTT,
TotalReceivedBytes,
TotalSentBytes,
UDPPacketsReceivedPersec,
UDPPacketsSentPersec,
FECRate,
LossRate,
RetransmissionRate,
})
if err != nil {
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
}
c.perfDataCollectorGraphics, err = perfdata.NewCollector(perfdata.V2, "RemoteFX Graphics", perfdata.AllInstances, []string{
AverageEncodingTime,
FrameQuality,
FramesSkippedPerSecondInsufficientClientResources,
FramesSkippedPerSecondInsufficientNetworkResources,
FramesSkippedPerSecondInsufficientServerResources,
GraphicsCompressionratio,
InputFramesPerSecond,
OutputFramesPerSecond,
SourceFramesPerSecond,
})
if err != nil {
return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)
}
// net // net
c.baseTCPRTT = prometheus.NewDesc( c.baseTCPRTT = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"), prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
@@ -211,228 +255,187 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name)) errs := make([]error, 0, 2)
if err := c.collectRemoteFXNetworkCount(ctx, logger, ch); err != nil {
logger.Error("failed collecting terminal services session count metrics",
slog.Any("err", err),
)
return err if err := c.collectRemoteFXNetworkCount(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting RemoteFX Network metrics: %w", err))
} }
if err := c.collectRemoteFXGraphicsCounters(ctx, logger, ch); err != nil { if err := c.collectRemoteFXGraphicsCounters(ch); err != nil {
logger.Error("failed collecting terminal services session count metrics", errs = append(errs, fmt.Errorf("failed collecting RemoteFX Graphics metrics: %w", err))
slog.Any("err", err),
)
return err
} }
return nil return errors.Join(errs...)
} }
type perflibRemoteFxNetwork struct { func (c *Collector) collectRemoteFXNetworkCount(ch chan<- prometheus.Metric) error {
Name string perfData, err := c.perfDataCollectorNetwork.Collect()
BaseTCPRTT float64 `perflib:"Base TCP RTT"`
BaseUDPRTT float64 `perflib:"Base UDP RTT"`
CurrentTCPBandwidth float64 `perflib:"Current TCP Bandwidth"`
CurrentTCPRTT float64 `perflib:"Current TCP RTT"`
CurrentUDPBandwidth float64 `perflib:"Current UDP Bandwidth"`
CurrentUDPRTT float64 `perflib:"Current UDP RTT"`
TotalReceivedBytes float64 `perflib:"Total Received Bytes"`
TotalSentBytes float64 `perflib:"Total Sent Bytes"`
UDPPacketsReceivedPersec float64 `perflib:"UDP Packets Received/sec"`
UDPPacketsSentPersec float64 `perflib:"UDP Packets Sent/sec"`
FECRate float64 `perflib:"Forward Error Correction (FEC) percentage"`
LossRate float64 `perflib:"Loss percentage"`
RetransmissionRate float64 `perflib:"Percentage of packets that have been retransmitted"`
}
func (c *Collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
dst := make([]perflibRemoteFxNetwork, 0)
err := v1.UnmarshalObject(ctx.PerfObjects["RemoteFX Network"], &dst, logger)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to collect RemoteFX Network metrics: %w", err)
} }
for _, d := range dst { for name, data := range perfData {
// only connect metrics for remote named sessions // only connect metrics for remote named sessions
n := strings.ToLower(normalizeSessionName(d.Name)) sessionName := normalizeSessionName(name)
if n == "" || n == "services" || n == "console" { if n := strings.ToLower(sessionName); n == "" || n == "services" || n == "console" {
continue continue
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.baseTCPRTT, c.baseTCPRTT,
prometheus.GaugeValue, prometheus.GaugeValue,
utils.MilliSecToSec(d.BaseTCPRTT), utils.MilliSecToSec(data[BaseTCPRTT].FirstValue),
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.baseUDPRTT, c.baseUDPRTT,
prometheus.GaugeValue, prometheus.GaugeValue,
utils.MilliSecToSec(d.BaseUDPRTT), utils.MilliSecToSec(data[BaseUDPRTT].FirstValue),
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.currentTCPBandwidth, c.currentTCPBandwidth,
prometheus.GaugeValue, prometheus.GaugeValue,
(d.CurrentTCPBandwidth*1000)/8, (data[CurrentTCPBandwidth].FirstValue*1000)/8,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.currentTCPRTT, c.currentTCPRTT,
prometheus.GaugeValue, prometheus.GaugeValue,
utils.MilliSecToSec(d.CurrentTCPRTT), utils.MilliSecToSec(data[CurrentTCPRTT].FirstValue),
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.currentUDPBandwidth, c.currentUDPBandwidth,
prometheus.GaugeValue, prometheus.GaugeValue,
(d.CurrentUDPBandwidth*1000)/8, (data[CurrentUDPBandwidth].FirstValue*1000)/8,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.currentUDPRTT, c.currentUDPRTT,
prometheus.GaugeValue, prometheus.GaugeValue,
utils.MilliSecToSec(d.CurrentUDPRTT), utils.MilliSecToSec(data[CurrentUDPRTT].FirstValue),
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.totalReceivedBytes, c.totalReceivedBytes,
prometheus.CounterValue, prometheus.CounterValue,
d.TotalReceivedBytes, data[TotalReceivedBytes].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.totalSentBytes, c.totalSentBytes,
prometheus.CounterValue, prometheus.CounterValue,
d.TotalSentBytes, data[TotalSentBytes].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.udpPacketsReceivedPerSec, c.udpPacketsReceivedPerSec,
prometheus.CounterValue, prometheus.CounterValue,
d.UDPPacketsReceivedPersec, data[UDPPacketsReceivedPersec].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.udpPacketsSentPerSec, c.udpPacketsSentPerSec,
prometheus.CounterValue, prometheus.CounterValue,
d.UDPPacketsSentPersec, data[UDPPacketsSentPersec].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.fecRate, c.fecRate,
prometheus.GaugeValue, prometheus.GaugeValue,
d.FECRate, data[FECRate].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.lossRate, c.lossRate,
prometheus.GaugeValue, prometheus.GaugeValue,
d.LossRate, data[LossRate].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.retransmissionRate, c.retransmissionRate,
prometheus.GaugeValue, prometheus.GaugeValue,
d.RetransmissionRate, data[RetransmissionRate].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
} }
return nil return nil
} }
type perflibRemoteFxGraphics struct { func (c *Collector) collectRemoteFXGraphicsCounters(ch chan<- prometheus.Metric) error {
Name string perfData, err := c.perfDataCollectorNetwork.Collect()
AverageEncodingTime float64 `perflib:"Average Encoding Time"`
FrameQuality float64 `perflib:"Frame Quality"`
FramesSkippedPerSecondInsufficientClientResources float64 `perflib:"Frames Skipped/Second - Insufficient Server Resources"`
FramesSkippedPerSecondInsufficientNetworkResources float64 `perflib:"Frames Skipped/Second - Insufficient Network Resources"`
FramesSkippedPerSecondInsufficientServerResources float64 `perflib:"Frames Skipped/Second - Insufficient Client Resources"`
GraphicsCompressionratio float64 `perflib:"Graphics Compression ratio"`
InputFramesPerSecond float64 `perflib:"Input Frames/Second"`
OutputFramesPerSecond float64 `perflib:"Output Frames/Second"`
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
}
func (c *Collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
dst := make([]perflibRemoteFxGraphics, 0)
err := v1.UnmarshalObject(ctx.PerfObjects["RemoteFX Graphics"], &dst, logger)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to collect RemoteFX Graphics metrics: %w", err)
} }
for _, d := range dst { for name, data := range perfData {
// only connect metrics for remote named sessions // only connect metrics for remote named sessions
n := strings.ToLower(normalizeSessionName(d.Name)) sessionName := normalizeSessionName(name)
if n == "" || n == "services" || n == "console" { if n := strings.ToLower(sessionName); n == "" || n == "services" || n == "console" {
continue continue
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.averageEncodingTime, c.averageEncodingTime,
prometheus.GaugeValue, prometheus.GaugeValue,
utils.MilliSecToSec(d.AverageEncodingTime), utils.MilliSecToSec(data[AverageEncodingTime].FirstValue),
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.frameQuality, c.frameQuality,
prometheus.GaugeValue, prometheus.GaugeValue,
d.FrameQuality, data[FrameQuality].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.framesSkippedPerSecondInsufficientResources, c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue, prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientClientResources, data[FramesSkippedPerSecondInsufficientClientResources].FirstValue,
normalizeSessionName(d.Name), sessionName,
"client", "client",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.framesSkippedPerSecondInsufficientResources, c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue, prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientNetworkResources, data[FramesSkippedPerSecondInsufficientNetworkResources].FirstValue,
normalizeSessionName(d.Name), sessionName,
"network", "network",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.framesSkippedPerSecondInsufficientResources, c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue, prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientServerResources, data[FramesSkippedPerSecondInsufficientServerResources].FirstValue,
normalizeSessionName(d.Name), sessionName,
"server", "server",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.graphicsCompressionRatio, c.graphicsCompressionRatio,
prometheus.GaugeValue, prometheus.GaugeValue,
d.GraphicsCompressionratio, data[GraphicsCompressionratio].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.inputFramesPerSecond, c.inputFramesPerSecond,
prometheus.CounterValue, prometheus.CounterValue,
d.InputFramesPerSecond, data[InputFramesPerSecond].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.outputFramesPerSecond, c.outputFramesPerSecond,
prometheus.CounterValue, prometheus.CounterValue,
d.OutputFramesPerSecond, data[OutputFramesPerSecond].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.sourceFramesPerSecond, c.sourceFramesPerSecond,
prometheus.CounterValue, prometheus.CounterValue,
d.SourceFramesPerSecond, data[SourceFramesPerSecond].FirstValue,
normalizeSessionName(d.Name), sessionName,
) )
} }

View File

@@ -165,10 +165,6 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
logger = logger.With(slog.String("collector", Name)) logger = logger.With(slog.String("collector", Name))
if err := c.collect(logger, ch); err != nil { if err := c.collect(logger, ch); err != nil {
logger.Error("failed collecting API service metrics:",
slog.Any("err", err),
)
return fmt.Errorf("failed collecting API service metrics: %w", err) return fmt.Errorf("failed collecting API service metrics: %w", err)
} }

View File

@@ -1,11 +1,11 @@
package system package system
const ( const (
ContextSwitchesPersec = "Context Switches/sec" contextSwitchesPersec = "Context Switches/sec"
ExceptionDispatchesPersec = "Exception Dispatches/sec" exceptionDispatchesPersec = "Exception Dispatches/sec"
ProcessorQueueLength = "Processor Queue Length" processorQueueLength = "Processor Queue Length"
SystemCallsPersec = "System Calls/sec" systemCallsPersec = "System Calls/sec"
SystemUpTime = "System Up Time" systemUpTime = "System Up Time"
Processes = "Processes" processes = "Processes"
Threads = "Threads" threads = "Threads"
) )

View File

@@ -62,23 +62,23 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
c.perfDataCollector.Close()
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
counters := []string{
ContextSwitchesPersec,
ExceptionDispatchesPersec,
ProcessorQueueLength,
SystemCallsPersec,
SystemUpTime,
Processes,
Threads,
}
var err error var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "System", nil, counters) c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "System", nil, []string{
contextSwitchesPersec,
exceptionDispatchesPersec,
processorQueueLength,
systemCallsPersec,
systemUpTime,
processes,
threads,
})
if err != nil { if err != nil {
return fmt.Errorf("failed to create System collector: %w", err) return fmt.Errorf("failed to create System collector: %w", err)
} }
@@ -160,37 +160,37 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.contextSwitchesTotal, c.contextSwitchesTotal,
prometheus.CounterValue, prometheus.CounterValue,
data[ContextSwitchesPersec].FirstValue, data[contextSwitchesPersec].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.exceptionDispatchesTotal, c.exceptionDispatchesTotal,
prometheus.CounterValue, prometheus.CounterValue,
data[ExceptionDispatchesPersec].FirstValue, data[exceptionDispatchesPersec].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processorQueueLength, c.processorQueueLength,
prometheus.GaugeValue, prometheus.GaugeValue,
data[ProcessorQueueLength].FirstValue, data[processorQueueLength].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.processes, c.processes,
prometheus.GaugeValue, prometheus.GaugeValue,
data[Processes].FirstValue, data[processes].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.systemCallsTotal, c.systemCallsTotal,
prometheus.CounterValue, prometheus.CounterValue,
data[SystemCallsPersec].FirstValue, data[systemCallsPersec].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.systemUpTime, c.systemUpTime,
prometheus.GaugeValue, prometheus.GaugeValue,
data[SystemUpTime].FirstValue, data[systemUpTime].FirstValue,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.threads, c.threads,
prometheus.GaugeValue, prometheus.GaugeValue,
data[Threads].FirstValue, data[threads].FirstValue,
) )
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value. // Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.

View File

@@ -101,14 +101,9 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
logger.Error("failed collecting thermalzone metrics", return fmt.Errorf("failed collecting thermalzone metrics: %w", err)
slog.Any("err", err),
)
return err
} }
return nil return nil

View File

@@ -115,18 +115,16 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
} }
} }
counters := []string{ var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Windows Time Service", nil, []string{
ClockFrequencyAdjustmentPPBTotal, ClockFrequencyAdjustmentPPBTotal,
ComputedTimeOffset, ComputedTimeOffset,
NTPClientTimeSourceCount, NTPClientTimeSourceCount,
NTPRoundTripDelay, NTPRoundTripDelay,
NTPServerIncomingRequestsTotal, NTPServerIncomingRequestsTotal,
NTPServerOutgoingResponsesTotal, NTPServerOutgoingResponsesTotal,
} })
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Windows Time Service", nil, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Windows Time Service collector: %w", err) return fmt.Errorf("failed to create Windows Time Service collector: %w", err)
} }

View File

@@ -3,6 +3,7 @@
package update package update
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"log/slog" "log/slog"
@@ -37,7 +38,8 @@ var ErrNoUpdates = errors.New("no updates available")
type Collector struct { type Collector struct {
config Config config Config
mu sync.RWMutex mu sync.RWMutex
ctxCancelFn context.CancelFunc
metricsBuf []prometheus.Metric metricsBuf []prometheus.Metric
@@ -77,6 +79,8 @@ func NewWithFlags(app *kingpin.Application) *Collector {
} }
func (c *Collector) Close(_ *slog.Logger) error { func (c *Collector) Close(_ *slog.Logger) error {
c.ctxCancelFn()
return nil return nil
} }
@@ -85,8 +89,12 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Info("update collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.") logger.Info("update collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.")
ctx, cancel := context.WithCancel(context.Background())
initErrCh := make(chan error, 1) initErrCh := make(chan error, 1)
go c.scheduleUpdateStatus(logger, initErrCh, c.config.online) go c.scheduleUpdateStatus(ctx, logger, initErrCh, c.config.online)
c.ctxCancelFn = cancel
if err := <-initErrCh; err != nil { if err := <-initErrCh; err != nil {
return fmt.Errorf("failed to initialize Windows Update collector: %w", err) return fmt.Errorf("failed to initialize Windows Update collector: %w", err)
@@ -137,7 +145,7 @@ func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- pr
return nil return nil
} }
func (c *Collector) scheduleUpdateStatus(logger *slog.Logger, initErrCh chan<- error, online bool) { func (c *Collector) scheduleUpdateStatus(ctx context.Context, logger *slog.Logger, initErrCh chan<- error, online bool) {
// The only way to run WMI queries in parallel while being thread-safe is to // The only way to run WMI queries in parallel while being thread-safe is to
// ensure the CoInitialize[Ex]() call is bound to its current OS thread. // ensure the CoInitialize[Ex]() call is bound to its current OS thread.
// Otherwise, attempting to initialize and run parallel queries across // Otherwise, attempting to initialize and run parallel queries across
@@ -226,10 +234,12 @@ func (c *Collector) scheduleUpdateStatus(logger *slog.Logger, initErrCh chan<- e
usd := us.ToIDispatch() usd := us.ToIDispatch()
defer usd.Release() defer usd.Release()
var metricsBuf []prometheus.Metric
for { for {
metricsBuf, err := c.fetchUpdates(logger, usd) metricsBuf, err = c.fetchUpdates(logger, usd)
if err != nil { if err != nil {
logger.Error("failed to fetch updates", logger.ErrorContext(ctx, "failed to fetch updates",
slog.Any("err", err), slog.Any("err", err),
) )
@@ -244,7 +254,11 @@ func (c *Collector) scheduleUpdateStatus(logger *slog.Logger, initErrCh chan<- e
c.metricsBuf = metricsBuf c.metricsBuf = metricsBuf
c.mu.Unlock() c.mu.Unlock()
time.Sleep(c.config.scrapeInterval) select {
case <-time.After(c.config.scrapeInterval):
case <-ctx.Done():
return
}
} }
} }

View File

@@ -82,7 +82,9 @@ func (c *Collector) Close(_ *slog.Logger) error {
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
counters := []string{ var err error
c.perfDataCollectorCPU, err = perfdata.NewCollector(perfdata.V2, "VM Processor", perftypes.TotalInstance, []string{
cpuLimitMHz, cpuLimitMHz,
cpuReservationMHz, cpuReservationMHz,
cpuShares, cpuShares,
@@ -90,11 +92,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
cpuTimePercents, cpuTimePercents,
couEffectiveVMSpeedMHz, couEffectiveVMSpeedMHz,
cpuHostProcessorSpeedMHz, cpuHostProcessorSpeedMHz,
} })
var err error
c.perfDataCollectorCPU, err = perfdata.NewCollector(perfdata.V2, "VM Processor", perftypes.TotalInstance, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create VM Processor collector: %w", err) return fmt.Errorf("failed to create VM Processor collector: %w", err)
} }
@@ -142,7 +140,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil, nil,
) )
counters = []string{ c.perfDataCollectorMemory, err = perfdata.NewCollector(perfdata.V2, "VM Memory", nil, []string{
memActiveMB, memActiveMB,
memBalloonedMB, memBalloonedMB,
memLimitMB, memLimitMB,
@@ -155,9 +153,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
memSwappedMB, memSwappedMB,
memTargetSizeMB, memTargetSizeMB,
memUsedMB, memUsedMB,
} })
c.perfDataCollectorMemory, err = perfdata.NewCollector(perfdata.V2, "VM Memory", nil, counters)
if err != nil { if err != nil {
return fmt.Errorf("failed to create VM Memory collector: %w", err) return fmt.Errorf("failed to create VM Memory collector: %w", err)
} }