mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-17 02:06:35 +00:00
Merge branch 'features/sql-transactions-collector' of https://github.com/callvirtual/wmi_exporter into features/sql-transactions-collector
This commit is contained in:
@@ -454,7 +454,7 @@ func NewADCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ADCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ad metrics:", desc, err)
|
||||
return err
|
||||
|
||||
71
collector/collector.go
Normal file
71
collector/collector.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
// TODO: Make package-local
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
windowsEpoch = 116444736000000000
|
||||
)
|
||||
|
||||
// getWindowsVersion reads the version number of the OS from the Registry
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
type ScrapeContext struct {
|
||||
perfObjects map[string]*perflib.PerfObject
|
||||
}
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
|
||||
func PrepareScrapeContext() (*ScrapeContext, error) {
|
||||
objs, err := getPerflibSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ScrapeContext{objs}, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build windows
|
||||
// +build windows,cgo
|
||||
|
||||
package collector
|
||||
|
||||
@@ -131,7 +131,7 @@ func NewContainerMetricsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ContainerMetricsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
|
||||
return err
|
||||
|
||||
472
collector/cpu.go
472
collector/cpu.go
@@ -3,60 +3,75 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cpu"] = NewCPUCollector
|
||||
Factories["cpu"] = newCPUCollector
|
||||
}
|
||||
|
||||
//A function to get windows version from registry
|
||||
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
// A CPUCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Processor metrics
|
||||
type CPUCollector struct {
|
||||
type cpuCollectorBasic struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ProcessorFrequency *prometheus.Desc
|
||||
}
|
||||
type cpuCollectorFull struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ClockInterruptsTotal *prometheus.Desc
|
||||
IdleBreakEventsTotal *prometheus.Desc
|
||||
ParkingStatus *prometheus.Desc
|
||||
ProcessorFrequencyMHz *prometheus.Desc
|
||||
ProcessorMaxFrequencyMHz *prometheus.Desc
|
||||
ProcessorPerformance *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCPUCollector constructs a new CPUCollector
|
||||
func NewCPUCollector() (Collector, error) {
|
||||
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
|
||||
func newCPUCollector() (Collector, error) {
|
||||
const subsystem = "cpu"
|
||||
return &CPUCollector{
|
||||
|
||||
version := getWindowsVersion()
|
||||
// For Windows 2008 (version 6.0) or earlier we only have the "Processor"
|
||||
// class. As of Windows 2008 R2 (version 6.1) the more detailed
|
||||
// "ProcessorInformation" set is available (although some of the counters
|
||||
// are added in later versions, so we aren't guaranteed to get all of
|
||||
// them).
|
||||
// Value 6.05 was selected to split between Windows versions.
|
||||
if version < 6.05 {
|
||||
return &cpuCollectorBasic{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &cpuCollectorFull{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
@@ -69,7 +84,6 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
@@ -82,241 +96,273 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequence_mhz"),
|
||||
ClockInterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
IdleBreakEventsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ParkingStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequencyMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_performance"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CPUCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cpu metrics:", desc, err)
|
||||
type perflibProcessor struct {
|
||||
Name string
|
||||
C1Transitions float64 `perflib:"C1 Transitions/sec"`
|
||||
C2Transitions float64 `perflib:"C2 Transitions/sec"`
|
||||
C3Transitions float64 `perflib:"C3 Transitions/sec"`
|
||||
DPCRate float64 `perflib:"DPC Rate"`
|
||||
DPCsQueued float64 `perflib:"DPCs Queued/sec"`
|
||||
Interrupts float64 `perflib:"Interrupts/sec"`
|
||||
PercentC2Time float64 `perflib:"% C1 Time"`
|
||||
PercentC3Time float64 `perflib:"% C2 Time"`
|
||||
PercentC1Time float64 `perflib:"% C3 Time"`
|
||||
PercentDPCTime float64 `perflib:"% DPC Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
PercentInterruptTime float64 `perflib:"% Interrupt Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessor, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfOS_Processor docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx
|
||||
type Win32_PerfRawData_PerfOS_Processor struct {
|
||||
Name string
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
DPCRate uint32
|
||||
DPCsQueuedPersec uint32
|
||||
InterruptsPersec uint32
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentUserTime uint64
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_Counters_ProcessorInformation struct {
|
||||
Name string
|
||||
AverageIdleTime uint64
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
ClockInterruptsPersec uint64
|
||||
DPCRate uint64
|
||||
DPCsQueuedPersec uint64
|
||||
IdleBreakEventsPersec uint64
|
||||
InterruptsPersec uint64
|
||||
ParkingStatus uint64
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentofMaximumFrequency uint64
|
||||
PercentPerformanceLimit uint64
|
||||
PercentPriorityTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentPrivilegedUtility uint64
|
||||
PercentProcessorPerformance uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentProcessorUtility uint64
|
||||
PercentUserTime uint64
|
||||
PerformanceLimitFlags uint64
|
||||
ProcessorFrequency uint64
|
||||
ProcessorStateFlags uint64
|
||||
}
|
||||
|
||||
func (c *CPUCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
version := getWindowsVersion()
|
||||
// Windows version by number https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
// For Windows 2008 or earlier Windows version is 6.0 or lower, so we use Win32_PerfRawData_PerfOS_Processor class
|
||||
// For Windows 2008 R2 or later Windows version is 6.1 or higher, so we use Win32_PerfRawData_Counters_ProcessorInformation
|
||||
// Value 6.05 was selected just to split between WIndows versions
|
||||
if version > 6.05 {
|
||||
var dst []Win32_PerfRawData_Counters_ProcessorInformation
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, data := range dst {
|
||||
if strings.Contains(data.Name, "_Total") {
|
||||
continue
|
||||
}
|
||||
core := data.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.ProcessorFrequency),
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentC1Time)*ticksToSecondsScaleFactor,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentC2Time)*ticksToSecondsScaleFactor,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentC3Time)*ticksToSecondsScaleFactor,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentInterruptTime)*ticksToSecondsScaleFactor,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentDPCTime)*ticksToSecondsScaleFactor,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.InterruptsPersec),
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.DPCsQueuedPersec),
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
var dst []Win32_PerfRawData_PerfOS_Processor
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, data := range dst {
|
||||
if strings.Contains(data.Name, "_Total") {
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := data.Name
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentC1Time)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentC1Time,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentC2Time)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentC2Time,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentC3Time)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentC3Time,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentIdleTime,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentInterruptTime)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentInterruptTime,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentDPCTime)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentDPCTime,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentPrivilegedTime,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
cpu.PercentUserTime,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.InterruptsPersec),
|
||||
cpu.Interrupts,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.DPCsQueuedPersec),
|
||||
cpu.DPCsQueued,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ClockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ParkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPerformance,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func NewCSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cs metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -181,7 +181,7 @@ func NewDNSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting dns metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -597,7 +597,7 @@ func NewHyperVCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *HyperVCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectVmHealth(ch); err != nil {
|
||||
log.Error("failed collecting hyperV health status metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -818,7 +818,7 @@ func NewIISCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *IISCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *IISCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting iis metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -134,7 +134,7 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting logical_disk metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -256,7 +256,7 @@ func NewMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting memory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -68,7 +68,7 @@ func NewMSMQCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1795,7 +1795,7 @@ func (c *MSSQLCollector) execute(name string, fn mssqlCollectorFunc, ch chan<- p
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSSQLCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *MSSQLCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
enabled := mssqlExpandEnabledCollectors(*mssqlEnabledCollectors)
|
||||
|
||||
@@ -132,7 +132,7 @@ func NewNetworkCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting net metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -46,7 +46,7 @@ func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRJitCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -88,7 +88,7 @@ func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -74,7 +74,7 @@ func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -112,7 +112,7 @@ func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -67,7 +67,7 @@ func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -113,7 +113,7 @@ func NewOSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting os metrics:", desc, err)
|
||||
return err
|
||||
|
||||
100
collector/perflib.go
Normal file
100
collector/perflib.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func getPerflibSnapshot() (map[string]*perflib.PerfObject, error) {
|
||||
objects, err := perflib.QueryPerformanceData("Global")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexed := make(map[string]*perflib.PerfObject)
|
||||
for _, obj := range objects {
|
||||
indexed[obj.Name] = obj
|
||||
}
|
||||
return indexed, nil
|
||||
}
|
||||
|
||||
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("counter not found")
|
||||
}
|
||||
rv := reflect.ValueOf(vs)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return fmt.Errorf("%v is nil or not a pointer to slice", reflect.TypeOf(vs))
|
||||
}
|
||||
ev := rv.Elem()
|
||||
if ev.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("%v is not slice", reflect.TypeOf(vs))
|
||||
}
|
||||
|
||||
// Ensure sufficient length
|
||||
if ev.Cap() < len(obj.Instances) {
|
||||
nvs := reflect.MakeSlice(ev.Type(), len(obj.Instances), len(obj.Instances))
|
||||
ev.Set(nvs)
|
||||
}
|
||||
|
||||
for idx, instance := range obj.Instances {
|
||||
target := ev.Index(idx)
|
||||
rt := target.Type()
|
||||
|
||||
counters := make(map[string]*perflib.PerfCounter, len(instance.Counters))
|
||||
for _, ctr := range instance.Counters {
|
||||
if ctr.Def.IsBaseValue && !ctr.Def.IsNanosecondCounter {
|
||||
counters[ctr.Def.Name+"_Base"] = ctr
|
||||
} else {
|
||||
counters[ctr.Def.Name] = ctr
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
tag := f.Tag.Get("perflib")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ctr, found := counters[tag]
|
||||
if !found {
|
||||
log.Debugf("missing counter %q, have %v", tag, counterMapKeys(counters))
|
||||
continue
|
||||
}
|
||||
if !target.Field(i).CanSet() {
|
||||
return fmt.Errorf("tagged field %v cannot be written to", f.Name)
|
||||
}
|
||||
if fieldType := target.Field(i).Type(); fieldType != reflect.TypeOf((*float64)(nil)).Elem() {
|
||||
return fmt.Errorf("tagged field %v has wrong type %v, must be float64", f.Name, fieldType)
|
||||
}
|
||||
|
||||
switch ctr.Def.CounterType {
|
||||
case perflibCollector.PERF_ELAPSED_TIME:
|
||||
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
|
||||
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
|
||||
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
|
||||
default:
|
||||
target.Field(i).SetFloat(float64(ctr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Name != "" && target.FieldByName("Name").CanSet() {
|
||||
target.FieldByName("Name").SetString(instance.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func counterMapKeys(m map[string]*perflib.PerfCounter) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
125
collector/perflib_test.go
Normal file
125
collector/perflib_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
)
|
||||
|
||||
type simple struct {
|
||||
ValA float64 `perflib:"Something"`
|
||||
ValB float64 `perflib:"Something Else"`
|
||||
}
|
||||
|
||||
func TestUnmarshalPerflib(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
obj *perflib.PerfObject
|
||||
|
||||
expectedOutput []simple
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "nil check",
|
||||
obj: nil,
|
||||
expectedOutput: []simple{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "Simple",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple properties",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something Else",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 256,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123, ValB: 256}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple instances",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 321,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 231,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 321}, {ValA: 231}},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
output := make([]simple, 0)
|
||||
err := unmarshalObject(c.obj, &output)
|
||||
if err != nil && !c.expectError {
|
||||
t.Errorf("Did not expect error, got %q", err)
|
||||
}
|
||||
if err == nil && c.expectError {
|
||||
t.Errorf("Expected an error, but got ok")
|
||||
}
|
||||
|
||||
if err == nil && !reflect.DeepEqual(output, c.expectedOutput) {
|
||||
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func NewProcessCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ProcessCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ProcessCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting process metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -64,7 +64,7 @@ func NewserviceCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting service metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -69,7 +69,7 @@ func NewSystemCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting system metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -90,7 +90,7 @@ func NewTCPCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -212,7 +212,7 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
// Update implements the Collector interface.
|
||||
func (c *textFileCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
error := 0.0
|
||||
mtimes := map[string]time.Time{}
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@ func init() {
|
||||
Factories["thermalzone"] = NewThermalZoneCollector
|
||||
}
|
||||
|
||||
// A ThermalZoneCollector is a Prometheus collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics
|
||||
type ThermalZoneCollector struct {
|
||||
// A thermalZoneCollector is a Prometheus collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics
|
||||
type thermalZoneCollector struct {
|
||||
PercentPassiveLimit *prometheus.Desc
|
||||
Temperature *prometheus.Desc
|
||||
ThrottleReasons *prometheus.Desc
|
||||
@@ -20,7 +20,7 @@ type ThermalZoneCollector struct {
|
||||
// NewThermalZoneCollector ...
|
||||
func NewThermalZoneCollector() (Collector, error) {
|
||||
const subsystem = "thermalzone"
|
||||
return &ThermalZoneCollector{
|
||||
return &thermalZoneCollector{
|
||||
Temperature: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "temperature_celsius"),
|
||||
"(Temperature)",
|
||||
@@ -50,7 +50,7 @@ func NewThermalZoneCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ThermalZoneCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *thermalZoneCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting thermalzone metrics:", desc, err)
|
||||
return err
|
||||
@@ -68,7 +68,7 @@ type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
|
||||
ThrottleReasons uint32
|
||||
}
|
||||
|
||||
func (c *ThermalZoneCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
func (c *thermalZoneCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
|
||||
@@ -162,7 +162,7 @@ func NewVmwareCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *VmwareCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectMem(ch); err != nil {
|
||||
log.Error("failed collecting vmware memory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -4,27 +4,9 @@ import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
func className(src interface{}) string {
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
|
||||
Reference in New Issue
Block a user