switch to go-kit logger

Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
Jan-Otto Kröpke
2023-04-22 12:17:51 +02:00
parent e0e31254e2
commit 8509bc69a6
70 changed files with 1052 additions and 993 deletions

View File

@@ -6,13 +6,16 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A ADCollector is a Prometheus collector for WMI Win32_PerfRawData_DirectoryServices_DirectoryServices metrics
type ADCollector struct {
logger log.Logger
AddressBookOperationsTotal *prometheus.Desc
AddressBookClientSessions *prometheus.Desc
ApproximateHighestDistinguishedNameTag *prometheus.Desc
@@ -77,9 +80,11 @@ type ADCollector struct {
}
// newADCollector ...
func newADCollector() (Collector, error) {
func newADCollector(logger log.Logger) (Collector, error) {
const subsystem = "ad"
return &ADCollector{
logger: log.With(logger, "collector", subsystem),
AddressBookOperationsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "address_book_operations_total"),
"",
@@ -453,7 +458,7 @@ func newADCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting ad metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting ad metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -613,7 +618,7 @@ type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
func (c *ADCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_DirectoryServices_DirectoryServices
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -5,12 +5,16 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/prometheus/client_golang/prometheus"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
type adcsCollector struct {
logger log.Logger
RequestsPerSecond *prometheus.Desc
RequestProcessingTime *prometheus.Desc
RetrievalsPerSecond *prometheus.Desc
@@ -27,9 +31,11 @@ type adcsCollector struct {
}
// ADCSCollectorMethod ...
func adcsCollectorMethod() (Collector, error) {
func adcsCollectorMethod(logger log.Logger) (Collector, error) {
const subsystem = "adcs"
return &adcsCollector{
logger: log.With(logger, "collector", subsystem),
RequestsPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "requests_total"),
"Total certificate requests processed",
@@ -113,7 +119,7 @@ func adcsCollectorMethod() (Collector, error) {
func (c *adcsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectADCSCounters(ctx, ch); err != nil {
log.Error("Failed collecting ADCS Metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -141,7 +147,7 @@ func (c *adcsCollector) collectADCSCounters(ctx *ScrapeContext, ch chan<- promet
if _, ok := ctx.perfObjects["Certification Authority"]; !ok {
return nil, errors.New("Perflib did not contain an entry for Certification Authority")
}
err := unmarshalObject(ctx.perfObjects["Certification Authority"], &dst)
err := unmarshalObject(ctx.perfObjects["Certification Authority"], &dst, c.logger)
if err != nil {
return nil, err
}

View File

@@ -4,11 +4,15 @@
package collector
import (
"github.com/prometheus/client_golang/prometheus"
"math"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type adfsCollector struct {
logger log.Logger
adLoginConnectionFailures *prometheus.Desc
certificateAuthentications *prometheus.Desc
deviceAuthentications *prometheus.Desc
@@ -55,10 +59,12 @@ type adfsCollector struct {
}
// newADFSCollector constructs a new adfsCollector
func newADFSCollector() (Collector, error) {
func newADFSCollector(logger log.Logger) (Collector, error) {
const subsystem = "adfs"
return &adfsCollector{
logger: log.With(logger, "collector", subsystem),
adLoginConnectionFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "ad_login_connection_failures_total"),
"Total number of connection failures to an Active Directory domain controller",
@@ -368,7 +374,7 @@ type perflibADFS struct {
func (c *adfsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var adfsData []perflibADFS
err := unmarshalObject(ctx.perfObjects["AD FS"], &adfsData)
err := unmarshalObject(ctx.perfObjects["AD FS"], &adfsData, c.logger)
if err != nil {
return err
}

View File

@@ -4,12 +4,15 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// A CacheCollector is a Prometheus collector for Perflib Cache metrics
type CacheCollector struct {
logger log.Logger
AsyncCopyReadsTotal *prometheus.Desc
AsyncDataMapsTotal *prometheus.Desc
AsyncFastReadsTotal *prometheus.Desc
@@ -42,9 +45,11 @@ type CacheCollector struct {
}
// NewCacheCollector ...
func newCacheCollector() (Collector, error) {
func newCacheCollector(logger log.Logger) (Collector, error) {
const subsystem = "cache"
return &CacheCollector{
logger: log.With(logger, "collector", subsystem),
AsyncCopyReadsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)",
@@ -225,7 +230,7 @@ func newCacheCollector() (Collector, error) {
// Collect implements the Collector interface
func (c *CacheCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting cache metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting cache metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -267,7 +272,7 @@ type perflibCache struct {
func (c *CacheCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []perflibCache // Single-instance class, array is required but will have single entry.
if err := unmarshalObject(ctx.perfObjects["Cache"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["Cache"], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -7,8 +7,9 @@ import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/leoluk/perflib_exporter/perflib"
"github.com/prometheus-community/windows_exporter/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
)
@@ -25,35 +26,35 @@ const (
// getWindowsVersion reads the version number of the OS from the Registry
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
func getWindowsVersion() float64 {
func getWindowsVersion(logger log.Logger) float64 {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
if err != nil {
log.Warn("Couldn't open registry", err)
level.Warn(logger).Log("msg", "Couldn't open registry", "err", err)
return 0
}
defer func() {
err = k.Close()
if err != nil {
log.Warnf("Failed to close registry key: %v", err)
level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
}
}()
currentv, _, err := k.GetStringValue("CurrentVersion")
if err != nil {
log.Warn("Couldn't open registry to determine current Windows version:", err)
level.Warn(logger).Log("msg", "Couldn't open registry to determine current Windows version", "err", err)
return 0
}
currentv_flt, err := strconv.ParseFloat(currentv, 64)
log.Debugf("Detected Windows version %f\n", currentv_flt)
level.Debug(logger).Log("msg", fmt.Sprintf("Detected Windows version %f\n", currentv_flt))
return currentv_flt
}
type collectorBuilder func() (Collector, error)
type collectorBuilder func(log.Logger) (Collector, error)
type flagsBuilder func(*kingpin.Application)
type perfCounterNamesBuilder func() []string
type perfCounterNamesBuilder func(log.Logger) []string
var (
builders = make(map[string]collectorBuilder)
@@ -80,12 +81,12 @@ func Available() []string {
}
return cs
}
func Build(collector string) (Collector, error) {
func Build(collector string, logger log.Logger) (Collector, error) {
builder, exists := builders[collector]
if !exists {
return nil, fmt.Errorf("Unknown collector %q", collector)
}
return builder()
return builder(logger)
}
func getPerfQuery(collectors []string) string {
parts := make([]string, 0, len(collectors))

View File

@@ -4,6 +4,7 @@ import (
"reflect"
"testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@@ -35,14 +36,14 @@ func TestExpandChildCollectors(t *testing.T) {
}
}
func benchmarkCollector(b *testing.B, name string, collectFunc func() (Collector, error)) {
func benchmarkCollector(b *testing.B, name string, collectFunc func(logger log.Logger) (Collector, error)) {
// Create perflib scrape context. Some perflib collectors required a correct context,
// or will fail during benchmark.
scrapeContext, err := PrepareScrapeContext([]string{name})
if err != nil {
b.Error(err)
}
c, err := collectFunc()
c, err := collectFunc(log.NewNopLogger())
if err != nil {
b.Error(err)
}

View File

@@ -5,12 +5,15 @@ package collector
import (
"github.com/Microsoft/hcsshim"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
type ContainerMetricsCollector struct {
logger log.Logger
// Presence
ContainerAvailable *prometheus.Desc
@@ -42,9 +45,11 @@ type ContainerMetricsCollector struct {
}
// newContainerMetricsCollector constructs a new ContainerMetricsCollector
func newContainerMetricsCollector() (Collector, error) {
func newContainerMetricsCollector(logger log.Logger) (Collector, error) {
const subsystem = "container"
return &ContainerMetricsCollector{
logger: log.With(logger, "collector", subsystem),
ContainerAvailable: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "available"),
"Available",
@@ -160,17 +165,17 @@ func newContainerMetricsCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting ContainerMetricsCollector metrics", "desc", desc, "err", err)
return err
}
return nil
}
// containerClose closes the container resource
func containerClose(c hcsshim.Container) {
err := c.Close()
func (c *ContainerMetricsCollector) containerClose(container hcsshim.Container) {
err := container.Close()
if err != nil {
log.Error(err)
level.Error(c.logger).Log("err", err)
}
}
@@ -179,7 +184,7 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
// Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil {
log.Error("Err in Getting containers:", err)
level.Error(c.logger).Log("msg", "Err in Getting containers", "err", err)
return nil, err
}
@@ -197,16 +202,16 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
for _, containerDetails := range containers {
container, err := hcsshim.OpenContainer(containerDetails.ID)
if container != nil {
defer containerClose(container)
defer c.containerClose(container)
}
if err != nil {
log.Error("err in opening container: ", containerDetails.ID, err)
level.Error(c.logger).Log("msg", "err in opening container", "containerId", containerDetails.ID, "err", err)
continue
}
cstats, err := container.Statistics()
if err != nil {
log.Error("err in fetching container Statistics: ", containerDetails.ID, err)
level.Error(c.logger).Log("msg", "err in fetching container Statistics", "containerId", containerDetails.ID, "err", err)
continue
}
containerIdWithPrefix := getContainerIdWithPrefix(containerDetails)
@@ -255,7 +260,7 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
)
if len(cstats.Network) == 0 {
log.Info("No Network Stats for container: ", containerDetails.ID)
level.Info(c.logger).Log("msg", "No Network Stats for container", "containetId", containerDetails.ID)
continue
}

View File

@@ -6,16 +6,21 @@ package collector
import (
"strings"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type cpuCollectorBasic struct {
logger log.Logger
CStateSecondsTotal *prometheus.Desc
TimeTotal *prometheus.Desc
InterruptsTotal *prometheus.Desc
DPCsTotal *prometheus.Desc
}
type cpuCollectorFull struct {
logger log.Logger
CStateSecondsTotal *prometheus.Desc
TimeTotal *prometheus.Desc
InterruptsTotal *prometheus.Desc
@@ -33,10 +38,11 @@ type cpuCollectorFull struct {
}
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
func newCPUCollector() (Collector, error) {
func newCPUCollector(logger log.Logger) (Collector, error) {
const subsystem = "cpu"
logger = log.With(logger, "collector", subsystem)
version := getWindowsVersion()
version := getWindowsVersion(logger)
// For Windows 2008 (version 6.0) or earlier we only have the "Processor"
// class. As of Windows 2008 R2 (version 6.1) the more detailed
// "Processor Information" set is available (although some of the counters
@@ -45,6 +51,7 @@ func newCPUCollector() (Collector, error) {
// Value 6.05 was selected to split between Windows versions.
if version < 6.05 {
return &cpuCollectorBasic{
logger: logger,
CStateSecondsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
"Time spent in low-power idle state",
@@ -73,6 +80,7 @@ func newCPUCollector() (Collector, error) {
}
return &cpuCollectorFull{
logger: logger,
CStateSecondsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
"Time spent in low-power idle state",
@@ -175,7 +183,7 @@ type perflibProcessor struct {
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessor, 0)
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
err := unmarshalObject(ctx.perfObjects["Processor"], &data, c.logger)
if err != nil {
return err
}
@@ -284,7 +292,7 @@ type perflibProcessorInformation struct {
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessorInformation, 0)
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data, c.logger)
if err != nil {
return err
}

View File

@@ -8,7 +8,8 @@ import (
"strconv"
"strings"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
@@ -20,13 +21,18 @@ const (
// A CpuInfoCollector is a Prometheus collector for a few WMI metrics in Win32_Processor
type CpuInfoCollector struct {
logger log.Logger
CpuInfo *prometheus.Desc
}
func newCpuInfoCollector() (Collector, error) {
func newCpuInfoCollector(logger log.Logger) (Collector, error) {
const subsystem = "cpu_info"
return &CpuInfoCollector{
logger: log.With(logger, "collector", subsystem),
CpuInfo: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "cpu_info"),
prometheus.BuildFQName(Namespace, "", subsystem),
"Labeled CPU information as provided provided by Win32_Processor",
[]string{
"architecture",
@@ -55,7 +61,7 @@ type win32_Processor struct {
// to the provided prometheus Metric channel.
func (c *CpuInfoCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting cpu_info metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@@ -4,24 +4,28 @@
package collector
import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/log"
"github.com/prometheus/client_golang/prometheus"
)
// A CSCollector is a Prometheus collector for WMI metrics
type CSCollector struct {
logger log.Logger
PhysicalMemoryBytes *prometheus.Desc
LogicalProcessors *prometheus.Desc
Hostname *prometheus.Desc
}
// newCSCollector ...
func newCSCollector() (Collector, error) {
func newCSCollector(logger log.Logger) (Collector, error) {
const subsystem = "cs"
return &CSCollector{
logger: log.With(logger, "collector", subsystem),
LogicalProcessors: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "logical_processors"),
"ComputerSystem.NumberOfLogicalProcessors",
@@ -50,7 +54,7 @@ func newCSCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting cs metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting cs metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@@ -5,7 +5,8 @@ package collector
import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@@ -17,6 +18,8 @@ var dfsrEnabledCollectors *string
// DFSRCollector contains the metric and state data of the DFSR collectors.
type DFSRCollector struct {
logger log.Logger
// Connection source
ConnectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
ConnectionBytesReceivedTotal *prometheus.Desc
@@ -92,9 +95,10 @@ func newDFSRCollectorFlags(app *kingpin.Application) {
}
// newDFSRCollector is registered
func newDFSRCollector() (Collector, error) {
log.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
func newDFSRCollector(logger log.Logger) (Collector, error) {
const subsystem = "dfsr"
logger = log.With(logger, "collector", subsystem)
level.Info(logger).Log("msg", "dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
enabled := expandEnabledChildCollectors(*dfsrEnabledCollectors)
perfCounters := make([]string, 0, len(enabled))
@@ -104,6 +108,8 @@ func newDFSRCollector() (Collector, error) {
addPerfCounterDependencies(subsystem, perfCounters)
dfsrCollector := DFSRCollector{
logger: logger,
// Connection
ConnectionBandwidthSavingsUsingDFSReplicationTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
@@ -447,7 +453,7 @@ type PerflibDFSRConnection struct {
func (c *DFSRCollector) collectConnection(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRConnection
if err := unmarshalObject(ctx.perfObjects["DFS Replication Connections"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["DFS Replication Connections"], &dst, c.logger); err != nil {
return err
}
@@ -554,7 +560,7 @@ type PerflibDFSRFolder struct {
func (c *DFSRCollector) collectFolder(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRFolder
if err := unmarshalObject(ctx.perfObjects["DFS Replicated Folders"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["DFS Replicated Folders"], &dst, c.logger); err != nil {
return err
}
@@ -764,7 +770,7 @@ type PerflibDFSRVolume struct {
func (c *DFSRCollector) collectVolume(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRVolume
if err := unmarshalObject(ctx.perfObjects["DFS Replication Service Volumes"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["DFS Replication Service Volumes"], &dst, c.logger); err != nil {
return err
}

View File

@@ -4,11 +4,14 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
// A DhcpCollector is a Prometheus collector perflib DHCP metrics
type DhcpCollector struct {
logger log.Logger
PacketsReceivedTotal *prometheus.Desc
DuplicatesDroppedTotal *prometheus.Desc
PacketsExpiredTotal *prometheus.Desc
@@ -36,10 +39,12 @@ type DhcpCollector struct {
FailoverBndupdDropped *prometheus.Desc
}
func newDhcpCollector() (Collector, error) {
func newDhcpCollector(logger log.Logger) (Collector, error) {
const subsystem = "dhcp"
return &DhcpCollector{
logger: log.With(logger, "collector", subsystem),
PacketsReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_received_total"),
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
@@ -226,7 +231,7 @@ type dhcpPerf struct {
func (c *DhcpCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var perflib []dhcpPerf
if err := unmarshalObject(ctx.perfObjects["DHCP Server"], &perflib); err != nil {
if err := unmarshalObject(ctx.perfObjects["DHCP Server"], &perflib, c.logger); err != nil {
return err
}

View File

@@ -7,7 +7,8 @@ import (
"errors"
"strings"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
@@ -18,6 +19,8 @@ const (
// A DiskDriveInfoCollector is a Prometheus collector for a few WMI metrics in Win32_DiskDrive
type DiskDriveInfoCollector struct {
logger log.Logger
DiskInfo *prometheus.Desc
Status *prometheus.Desc
Size *prometheus.Desc
@@ -25,10 +28,12 @@ type DiskDriveInfoCollector struct {
Availability *prometheus.Desc
}
func newDiskDriveInfoCollector() (Collector, error) {
func newDiskDriveInfoCollector(logger log.Logger) (Collector, error) {
const subsystem = "diskdrive"
return &DiskDriveInfoCollector{
logger: log.With(logger, "collector", subsystem),
DiskInfo: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "info"),
"General drive information",
@@ -129,7 +134,7 @@ var (
// Collect sends the metric values for each metric to the provided prometheus Metric channel.
func (c *DiskDriveInfoCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting disk_drive_info metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@@ -6,13 +6,16 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A DNSCollector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
type DNSCollector struct {
logger log.Logger
ZoneTransferRequestsReceived *prometheus.Desc
ZoneTransferRequestsSent *prometheus.Desc
ZoneTransferResponsesReceived *prometheus.Desc
@@ -38,9 +41,11 @@ type DNSCollector struct {
}
// newDNSCollector ...
func newDNSCollector() (Collector, error) {
func newDNSCollector(logger log.Logger) (Collector, error) {
const subsystem = "dns"
return &DNSCollector{
logger: log.With(logger, "collector", subsystem),
ZoneTransferRequestsReceived: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_requests_received_total"),
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
@@ -180,7 +185,7 @@ func newDNSCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting dns metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting dns metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -234,7 +239,7 @@ type Win32_PerfRawData_DNS_DNS struct {
func (c *DNSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_DNS_DNS
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -9,7 +9,8 @@ import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@@ -19,6 +20,8 @@ const (
)
type exchangeCollector struct {
logger log.Logger
LDAPReadTime *prometheus.Desc
LDAPSearchTime *prometheus.Desc
LDAPWriteTime *prometheus.Desc
@@ -93,7 +96,8 @@ func newExchangeCollectorFlags(app *kingpin.Application) {
}
// newExchangeCollector returns a new Collector
func newExchangeCollector() (Collector, error) {
func newExchangeCollector(logger log.Logger) (Collector, error) {
const subsystem = "exchange"
// desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
@@ -106,6 +110,8 @@ func newExchangeCollector() (Collector, error) {
}
c := exchangeCollector{
logger: log.With(logger, "collector", subsystem),
RPCAveragedLatency: desc("rpc_avg_latency_sec", "The latency (sec), averaged for the past 1024 packets"),
RPCRequests: desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service"),
ActiveUserCount: desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes"),
@@ -201,7 +207,7 @@ func (c *exchangeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Met
for _, collectorName := range c.enabledCollectors {
if err := collectorFuncs[collectorName](ctx, ch); err != nil {
log.Errorf("Error in %s: %s", collectorName, err)
level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err)
return err
}
}
@@ -221,7 +227,7 @@ type perflibADAccessProcesses struct {
func (c *exchangeCollector) collectADAccessProcesses(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibADAccessProcesses
if err := unmarshalObject(ctx.perfObjects["MSExchange ADAccess Processes"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchange ADAccess Processes"], &data, c.logger); err != nil {
return err
}
@@ -279,7 +285,7 @@ type perflibAvailabilityService struct {
func (c *exchangeCollector) collectAvailabilityService(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAvailabilityService
if err := unmarshalObject(ctx.perfObjects["MSExchange Availability Service"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchange Availability Service"], &data, c.logger); err != nil {
return err
}
@@ -307,7 +313,7 @@ type perflibHTTPProxy struct {
func (c *exchangeCollector) collectHTTPProxy(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibHTTPProxy
if err := unmarshalObject(ctx.perfObjects["MSExchange HttpProxy"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchange HttpProxy"], &data, c.logger); err != nil {
return err
}
@@ -361,7 +367,7 @@ type perflibOWA struct {
func (c *exchangeCollector) collectOWA(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibOWA
if err := unmarshalObject(ctx.perfObjects["MSExchange OWA"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchange OWA"], &data, c.logger); err != nil {
return err
}
@@ -389,7 +395,7 @@ type perflibActiveSync struct {
func (c *exchangeCollector) collectActiveSync(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibActiveSync
if err := unmarshalObject(ctx.perfObjects["MSExchange ActiveSync"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchange ActiveSync"], &data, c.logger); err != nil {
return err
}
@@ -425,7 +431,7 @@ type perflibRPCClientAccess struct {
func (c *exchangeCollector) collectRPC(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibRPCClientAccess
if err := unmarshalObject(ctx.perfObjects["MSExchange RpcClientAccess"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchange RpcClientAccess"], &data, c.logger); err != nil {
return err
}
@@ -481,7 +487,7 @@ type perflibTransportQueues struct {
func (c *exchangeCollector) collectTransportQueues(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibTransportQueues
if err := unmarshalObject(ctx.perfObjects["MSExchangeTransport Queues"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchangeTransport Queues"], &data, c.logger); err != nil {
return err
}
@@ -555,7 +561,7 @@ type perflibWorkloadManagementWorkloads struct {
func (c *exchangeCollector) collectWorkloadManagementWorkloads(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibWorkloadManagementWorkloads
if err := unmarshalObject(ctx.perfObjects["MSExchange WorkloadManagement Workloads"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchange WorkloadManagement Workloads"], &data, c.logger); err != nil {
return err
}
@@ -606,7 +612,7 @@ type perflibAutodiscover struct {
func (c *exchangeCollector) collectAutoDiscover(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAutodiscover
if err := unmarshalObject(ctx.perfObjects["MSExchangeAutodiscover"], &data); err != nil {
if err := unmarshalObject(ctx.perfObjects["MSExchangeAutodiscover"], &data, c.logger); err != nil {
return err
}
for _, autodisc := range data {

View File

@@ -1,12 +1,15 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
type FSRMQuotaCollector struct {
logger log.Logger
QuotasCount *prometheus.Desc
Path *prometheus.Desc
PeakUsage *prometheus.Desc
@@ -20,9 +23,11 @@ type FSRMQuotaCollector struct {
Template *prometheus.Desc
}
func newFSRMQuotaCollector() (Collector, error) {
func newFSRMQuotaCollector(logger log.Logger) (Collector, error) {
const subsystem = "fsrmquota"
return &FSRMQuotaCollector{
logger: log.With(logger, "collector", subsystem),
QuotasCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "count"),
"Number of Quotas",
@@ -84,7 +89,7 @@ func newFSRMQuotaCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *FSRMQuotaCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting fsrmquota metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -109,7 +114,7 @@ type MSFT_FSRMQuota struct {
func (c *FSRMQuotaCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []MSFT_FSRMQuota
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
var count int

View File

@@ -4,15 +4,19 @@
package collector
import (
fmt "fmt"
"strings"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// HyperVCollector is a Prometheus collector for hyper-v
type HyperVCollector struct {
logger log.Logger
// Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
HealthCritical *prometheus.Desc
HealthOk *prometheus.Desc
@@ -127,9 +131,12 @@ type HyperVCollector struct {
}
// newHyperVCollector ...
func newHyperVCollector() (Collector, error) {
func newHyperVCollector(logger log.Logger) (Collector, error) {
const subsystem = "hyperv"
buildSubsystemName := func(component string) string { return "hyperv_" + component }
return &HyperVCollector{
logger: log.With(logger, "collector", subsystem),
HealthCritical: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, buildSubsystemName("health"), "critical"),
"This counter represents the number of virtual machines with critical health",
@@ -697,62 +704,62 @@ func newHyperVCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectVmHealth(ch); err != nil {
log.Error("failed collecting hyperV health status metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV health status metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmVid(ch); err != nil {
log.Error("failed collecting hyperV pages metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV pages metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmHv(ch); err != nil {
log.Error("failed collecting hyperV hv status metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV hv status metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmProcessor(ch); err != nil {
log.Error("failed collecting hyperV processor metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV processor metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectHostLPUsage(ch); err != nil {
log.Error("failed collecting hyperV host logical processors metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV host logical processors metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectHostCpuUsage(ch); err != nil {
log.Error("failed collecting hyperV host CPU metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV host CPU metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmCpuUsage(ch); err != nil {
log.Error("failed collecting hyperV VM CPU metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV VM CPU metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmSwitch(ch); err != nil {
log.Error("failed collecting hyperV switch metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV switch metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmEthernet(ch); err != nil {
log.Error("failed collecting hyperV ethernet metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV ethernet metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmStorage(ch); err != nil {
log.Error("failed collecting hyperV virtual storage metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV virtual storage metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmNetwork(ch); err != nil {
log.Error("failed collecting hyperV virtual network metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV virtual network metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmMemory(ch); err != nil {
log.Error("failed collecting hyperV virtual memory metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting hyperV virtual memory metrics", "desc", desc, "err", err)
return err
}
@@ -767,7 +774,7 @@ type Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
func (c *HyperVCollector) collectVmHealth(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -800,7 +807,7 @@ type Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition struct {
func (c *HyperVCollector) collectVmVid(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -864,7 +871,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition struct {
func (c *HyperVCollector) collectVmHv(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1002,7 +1009,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisor struct {
func (c *HyperVCollector) collectVmProcessor(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisor
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1036,7 +1043,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor struct {
func (c *HyperVCollector) collectHostLPUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1048,7 +1055,7 @@ func (c *HyperVCollector) collectHostLPUsage(ch chan<- prometheus.Metric) (*prom
// The name format is Hv LP <core id>
parts := strings.Split(obj.Name, " ")
if len(parts) != 3 {
log.Warnf("Unexpected format of Name in collectHostLPUsage: %q", obj.Name)
level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectHostLPUsage: %q", obj.Name))
continue
}
coreId := parts[2]
@@ -1090,7 +1097,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor struct {
func (c *HyperVCollector) collectHostCpuUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1102,7 +1109,7 @@ func (c *HyperVCollector) collectHostCpuUsage(ch chan<- prometheus.Metric) (*pro
// The name format is Root VP <core id>
parts := strings.Split(obj.Name, " ")
if len(parts) != 3 {
log.Warnf("Unexpected format of Name in collectHostCpuUsage: %q", obj.Name)
level.Warn(c.logger).Log("msg", "Unexpected format of Name in collectHostCpuUsage: "+obj.Name)
continue
}
coreId := parts[2]
@@ -1151,7 +1158,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor struct {
func (c *HyperVCollector) collectVmCpuUsage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1163,12 +1170,12 @@ func (c *HyperVCollector) collectVmCpuUsage(ch chan<- prometheus.Metric) (*prome
// The name format is <VM Name>:Hv VP <vcore id>
parts := strings.Split(obj.Name, ":")
if len(parts) != 2 {
log.Warnf("Unexpected format of Name in collectVmCpuUsage: %q, expected %q. Skipping.", obj.Name, "<VM Name>:Hv VP <vcore id>")
level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectVmCpuUsage: %q, expected %q. Skipping.", obj.Name, "<VM Name>:Hv VP <vcore id>"))
continue
}
coreParts := strings.Split(parts[1], " ")
if len(coreParts) != 3 {
log.Warnf("Unexpected format of core identifier in collectVmCpuUsage: %q, expected %q. Skipping.", parts[1], "Hv VP <vcore id>")
level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of core identifier in collectVmCpuUsage: %q, expected %q. Skipping.", parts[1], "Hv VP <vcore id>"))
continue
}
vmName := parts[0]
@@ -1238,7 +1245,7 @@ type Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch struct {
func (c *HyperVCollector) collectVmSwitch(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1403,7 +1410,7 @@ type Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter struct {
func (c *HyperVCollector) collectVmEthernet(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1473,7 +1480,7 @@ type Win32_PerfRawData_Counters_HyperVVirtualStorageDevice struct {
func (c *HyperVCollector) collectVmStorage(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_Counters_HyperVVirtualStorageDevice
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1542,7 +1549,7 @@ type Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter struct {
func (c *HyperVCollector) collectVmNetwork(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1615,7 +1622,7 @@ type Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM struct {
func (c *HyperVCollector) collectVmMemory(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -11,7 +11,8 @@ import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
)
@@ -50,31 +51,31 @@ type simple_version struct {
minor uint64
}
func getIISVersion() simple_version {
func getIISVersion(logger log.Logger) simple_version {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\InetStp\`, registry.QUERY_VALUE)
if err != nil {
log.Warn("Couldn't open registry to determine IIS version:", err)
level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
return simple_version{}
}
defer func() {
err = k.Close()
if err != nil {
log.Warnf("Failed to close registry key: %v", err)
level.Warn(logger).Log("msg", fmt.Sprintf("Failed to close registry key"), "err", err)
}
}()
major, _, err := k.GetIntegerValue("MajorVersion")
if err != nil {
log.Warn("Couldn't open registry to determine IIS version:", err)
level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
return simple_version{}
}
minor, _, err := k.GetIntegerValue("MinorVersion")
if err != nil {
log.Warn("Couldn't open registry to determine IIS version:", err)
level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
return simple_version{}
}
log.Debugf("Detected IIS %d.%d\n", major, minor)
level.Debug(logger).Log("msg", fmt.Sprintf("Detected IIS %d.%d\n", major, minor))
return simple_version{
major: major,
@@ -83,6 +84,8 @@ func getIISVersion() simple_version {
}
type IISCollector struct {
logger log.Logger
// Web Service
CurrentAnonymousUsers *prometheus.Desc
CurrentBlockedAsyncIORequests *prometheus.Desc
@@ -255,10 +258,13 @@ func newIISCollectorFlags(app *kingpin.Application) {
}).String()
}
func newIISCollector() (Collector, error) {
func newIISCollector(logger log.Logger) (Collector, error) {
const subsystem = "iis"
logger = log.With(logger, "collector", subsystem)
if *oldSiteExclude != "" {
if !siteExcludeSet {
log.Warnln("msg", "--collector.iis.site-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.site-exclude")
level.Warn(logger).Log("msg", "--collector.iis.site-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.site-exclude")
*siteExclude = *oldSiteExclude
} else {
return nil, errors.New("--collector.iis.site-blacklist and --collector.iis.site-exclude are mutually exclusive")
@@ -266,7 +272,7 @@ func newIISCollector() (Collector, error) {
}
if *oldSiteInclude != "" {
if !siteIncludeSet {
log.Warnln("msg", "--collector.iis.site-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.site-include")
level.Warn(logger).Log("msg", "--collector.iis.site-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.site-include")
*siteInclude = *oldSiteInclude
} else {
return nil, errors.New("--collector.iis.site-whitelist and --collector.iis.site-include are mutually exclusive")
@@ -275,7 +281,7 @@ func newIISCollector() (Collector, error) {
if *oldAppExclude != "" {
if !appExcludeSet {
log.Warnln("msg", "--collector.iis.app-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.app-exclude")
level.Warn(logger).Log("msg", "--collector.iis.app-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.app-exclude")
*appExclude = *oldAppExclude
} else {
return nil, errors.New("--collector.iis.app-blacklist and --collector.iis.app-exclude are mutually exclusive")
@@ -283,16 +289,16 @@ func newIISCollector() (Collector, error) {
}
if *oldAppInclude != "" {
if !appIncludeSet {
log.Warnln("msg", "--collector.iis.app-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.app-include")
level.Warn(logger).Log("msg", "--collector.iis.app-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.app-include")
*appInclude = *oldAppInclude
} else {
return nil, errors.New("--collector.iis.app-whitelist and --collector.iis.app-include are mutually exclusive")
}
}
const subsystem = "iis"
return &IISCollector{
iis_version: getIISVersion(),
logger: logger,
iis_version: getIISVersion(logger),
siteIncludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *siteInclude)),
siteExcludePattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *siteExclude)),
@@ -914,22 +920,22 @@ func newIISCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *IISCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectWebService(ctx, ch); err != nil {
log.Error("failed collecting iis metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectAPP_POOL_WAS(ctx, ch); err != nil {
log.Error("failed collecting iis metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectW3SVC_W3WP(ctx, ch); err != nil {
log.Error("failed collecting iis metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectWebServiceCache(ctx, ch); err != nil {
log.Error("failed collecting iis metrics:", desc, err)
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}
@@ -1028,7 +1034,7 @@ func dedupIISNames[V hasGetIISName](services []V) map[string]V {
func (c *IISCollector) collectWebService(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var webService []perflibWebService
if err := unmarshalObject(ctx.perfObjects["Web Service"], &webService); err != nil {
if err := unmarshalObject(ctx.perfObjects["Web Service"], &webService, c.logger); err != nil {
return nil, err
}
@@ -1314,7 +1320,7 @@ var applicationStates = map[uint32]string{
func (c *IISCollector) collectAPP_POOL_WAS(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var APP_POOL_WAS []perflibAPP_POOL_WAS
if err := unmarshalObject(ctx.perfObjects["APP_POOL_WAS"], &APP_POOL_WAS); err != nil {
if err := unmarshalObject(ctx.perfObjects["APP_POOL_WAS"], &APP_POOL_WAS, c.logger); err != nil {
return nil, err
}
@@ -1491,7 +1497,7 @@ type perflibW3SVC_W3WP_IIS8 struct {
func (c *IISCollector) collectW3SVC_W3WP(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var W3SVC_W3WP []perflibW3SVC_W3WP
if err := unmarshalObject(ctx.perfObjects["W3SVC_W3WP"], &W3SVC_W3WP); err != nil {
if err := unmarshalObject(ctx.perfObjects["W3SVC_W3WP"], &W3SVC_W3WP, c.logger); err != nil {
return nil, err
}
@@ -1750,7 +1756,7 @@ func (c *IISCollector) collectW3SVC_W3WP(ctx *ScrapeContext, ch chan<- prometheu
if c.iis_version.major >= 8 {
var W3SVC_W3WP_IIS8 []perflibW3SVC_W3WP_IIS8
if err := unmarshalObject(ctx.perfObjects["W3SVC_W3WP"], &W3SVC_W3WP_IIS8); err != nil {
if err := unmarshalObject(ctx.perfObjects["W3SVC_W3WP"], &W3SVC_W3WP_IIS8, c.logger); err != nil {
return nil, err
}
@@ -1889,7 +1895,7 @@ type perflibWebServiceCache struct {
func (c *IISCollector) collectWebServiceCache(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var WebServiceCache []perflibWebServiceCache
if err := unmarshalObject(ctx.perfObjects["Web Service Cache"], &WebServiceCache); err != nil {
if err := unmarshalObject(ctx.perfObjects["Web Service Cache"], &WebServiceCache, c.logger); err != nil {
return nil, err
}

View File

@@ -1,6 +1,9 @@
package collector
import "github.com/alecthomas/kingpin/v2"
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
)
// collectorInit represents the required initialisation config for a collector.
type collectorInit struct {
@@ -15,7 +18,7 @@ type collectorInit struct {
perfCounterFunc perfCounterNamesBuilder
}
func getDFSRCollectorDeps() []string {
func getDFSRCollectorDeps(_ log.Logger) []string {
// Perflib sources are dynamic, depending on the enabled child collectors
var perflibDependencies []string
for _, source := range expandEnabledChildCollectors(*dfsrEnabledCollectors) {
@@ -36,7 +39,7 @@ var collectors = []collectorInit{
name: "adcs",
flags: nil,
builder: adcsCollectorMethod,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Certification Authority"}
},
},
@@ -44,7 +47,7 @@ var collectors = []collectorInit{
name: "adfs",
flags: nil,
builder: newADFSCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"AD FS"}
},
},
@@ -52,7 +55,7 @@ var collectors = []collectorInit{
name: "cache",
flags: nil,
builder: newCacheCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Cache"}
},
},
@@ -66,8 +69,8 @@ var collectors = []collectorInit{
name: "cpu",
flags: nil,
builder: newCPUCollector,
perfCounterFunc: func() []string {
if getWindowsVersion() > 6.05 {
perfCounterFunc: func(logger log.Logger) []string {
if getWindowsVersion(logger) > 6.05 {
return []string{"Processor Information"}
}
return []string{"Processor"}
@@ -113,7 +116,7 @@ var collectors = []collectorInit{
name: "exchange",
flags: newExchangeCollectorFlags,
builder: newExchangeCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{
"MSExchange ADAccess Processes",
"MSExchangeTransport Queues",
@@ -143,7 +146,7 @@ var collectors = []collectorInit{
name: "iis",
flags: newIISCollectorFlags,
builder: newIISCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{
"Web Service",
"APP_POOL_WAS",
@@ -156,7 +159,7 @@ var collectors = []collectorInit{
name: "logical_disk",
flags: newLogicalDiskCollectorFlags,
builder: newLogicalDiskCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"LogicalDisk"}
},
},
@@ -170,7 +173,7 @@ var collectors = []collectorInit{
name: "memory",
flags: nil,
builder: newMemoryCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Memory"}
},
},
@@ -220,7 +223,7 @@ var collectors = []collectorInit{
name: "net",
flags: newNetworkCollectorFlags,
builder: newNetworkCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Network Interface"}
},
},
@@ -276,7 +279,7 @@ var collectors = []collectorInit{
name: "os",
flags: nil,
builder: newOSCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Paging File"}
},
},
@@ -284,7 +287,7 @@ var collectors = []collectorInit{
name: "process",
flags: newProcessCollectorFlags,
builder: newProcessCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Process"}
},
},
@@ -292,7 +295,7 @@ var collectors = []collectorInit{
name: "remote_fx",
flags: nil,
builder: newRemoteFx,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"RemoteFX Network"}
},
},
@@ -312,7 +315,7 @@ var collectors = []collectorInit{
name: "smtp",
flags: newSMTPCollectorFlags,
builder: newSMTPCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"SMTP Server"}
},
},
@@ -320,7 +323,7 @@ var collectors = []collectorInit{
name: "system",
flags: nil,
builder: newSystemCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"System"}
},
},
@@ -334,7 +337,7 @@ var collectors = []collectorInit{
name: "tcp",
flags: nil,
builder: newTCPCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"TCPv4"}
},
},
@@ -342,7 +345,7 @@ var collectors = []collectorInit{
name: "terminal_services",
flags: nil,
builder: newTerminalServicesCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{
"Terminal Services",
"Terminal Services Session",
@@ -366,7 +369,7 @@ var collectors = []collectorInit{
name: "time",
flags: nil,
builder: newTimeCollector,
perfCounterFunc: func() []string {
perfCounterFunc: func(_ log.Logger) []string {
return []string{"Windows Time Service"}
},
},
@@ -394,12 +397,12 @@ func RegisterCollectorsFlags(app *kingpin.Application) {
}
// RegisterCollectors To be called by the exporter for collector initialisation
func RegisterCollectors() {
func RegisterCollectors(logger log.Logger) {
for _, v := range collectors {
var perfCounterNames []string
if v.perfCounterFunc != nil {
perfCounterNames = v.perfCounterFunc()
perfCounterNames = v.perfCounterFunc(logger)
}
registerCollector(v.name, v.builder, perfCounterNames...)

View File

@@ -9,7 +9,8 @@ import (
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@@ -34,6 +35,8 @@ var (
// A LogicalDiskCollector is a Prometheus collector for perflib logicalDisk metrics
type LogicalDiskCollector struct {
logger log.Logger
RequestsQueued *prometheus.Desc
AvgReadQueue *prometheus.Desc
AvgWriteQueue *prometheus.Desc
@@ -84,10 +87,13 @@ func newLogicalDiskCollectorFlags(app *kingpin.Application) {
}
// newLogicalDiskCollector ...
func newLogicalDiskCollector() (Collector, error) {
func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
const subsystem = "logical_disk"
logger = log.With(logger, "collector", subsystem)
if *volumeOldExclude != "" {
if !volumeExcludeSet {
log.Warnln("msg", "--collector.logical_disk.volume-blacklist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-exclude")
level.Warn(logger).Log("msg", "--collector.logical_disk.volume-blacklist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-exclude")
*volumeExclude = *volumeOldExclude
} else {
return nil, errors.New("--collector.logical_disk.volume-blacklist and --collector.logical_disk.volume-exclude are mutually exclusive")
@@ -95,16 +101,16 @@ func newLogicalDiskCollector() (Collector, error) {
}
if *volumeOldInclude != "" {
if !volumeIncludeSet {
log.Warnln("msg", "--collector.logical_disk.volume-whitelist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-include")
level.Warn(logger).Log("msg", "--collector.logical_disk.volume-whitelist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-include")
*volumeInclude = *volumeOldInclude
} else {
return nil, errors.New("--collector.logical_disk.volume-whitelist and --collector.logical_disk.volume-include are mutually exclusive")
}
}
const subsystem = "logical_disk"
return &LogicalDiskCollector{
logger: logger,
RequestsQueued: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "requests_queued"),
"The number of requests queued to the disk (LogicalDisk.CurrentDiskQueueLength)",
@@ -226,7 +232,7 @@ func newLogicalDiskCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting logical_disk metrics:", desc, err)
level.Error(c.logger).Log("failed collecting logical_disk metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -257,7 +263,7 @@ type logicalDisk struct {
func (c *LogicalDiskCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []logicalDisk
if err := unmarshalObject(ctx.perfObjects["LogicalDisk"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["LogicalDisk"], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -6,21 +6,25 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A LogonCollector is a Prometheus collector for WMI metrics
type LogonCollector struct {
logger log.Logger
LogonType *prometheus.Desc
}
// newLogonCollector ...
func newLogonCollector() (Collector, error) {
func newLogonCollector(logger log.Logger) (Collector, error) {
const subsystem = "logon"
return &LogonCollector{
logger: log.With(logger, "collector", subsystem),
LogonType: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "logon_type"),
"Number of active logon sessions (LogonSession.LogonType)",
@@ -34,7 +38,7 @@ func newLogonCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *LogonCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting user metrics:", desc, err)
level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -48,7 +52,7 @@ type Win32_LogonSession struct {
func (c *LogonCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_LogonSession
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -7,12 +7,15 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// A MemoryCollector is a Prometheus collector for perflib Memory metrics
type MemoryCollector struct {
logger log.Logger
AvailableBytes *prometheus.Desc
CacheBytes *prometheus.Desc
CacheBytesPeak *prometheus.Desc
@@ -48,10 +51,12 @@ type MemoryCollector struct {
}
// newMemoryCollector ...
func newMemoryCollector() (Collector, error) {
func newMemoryCollector(logger log.Logger) (Collector, error) {
const subsystem = "memory"
return &MemoryCollector{
logger: log.With(logger, "collector", subsystem),
AvailableBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "available_bytes"),
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
@@ -263,7 +268,7 @@ func newMemoryCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting memory metrics:", desc, err)
level.Error(c.logger).Log("failed collecting memory metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -308,7 +313,7 @@ type memory struct {
func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []memory
if err := unmarshalObject(ctx.perfObjects["Memory"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["Memory"], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -1,12 +1,15 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_ClusterCollector is a Prometheus collector for WMI MSCluster_Cluster metrics
type MSCluster_ClusterCollector struct {
logger log.Logger
AddEvictDelay *prometheus.Desc
AdminAccessPoint *prometheus.Desc
AutoAssignNodeSite *prometheus.Desc
@@ -86,9 +89,11 @@ type MSCluster_ClusterCollector struct {
WitnessRestartInterval *prometheus.Desc
}
func newMSCluster_ClusterCollector() (Collector, error) {
func newMSCluster_ClusterCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_cluster"
return &MSCluster_ClusterCollector{
logger: log.With(logger, "collector", subsystem),
AddEvictDelay: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "add_evict_delay"),
"Provides access to the cluster's AddEvictDelay property, which is the number a seconds that a new node is delayed after an eviction of another node.",
@@ -642,7 +647,7 @@ type MSCluster_Cluster struct {
// to the provided prometheus Metric channel.
func (c *MSCluster_ClusterCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Cluster
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}

View File

@@ -1,12 +1,15 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_NetworkCollector is a Prometheus collector for WMI MSCluster_Network metrics
type MSCluster_NetworkCollector struct {
logger log.Logger
Characteristics *prometheus.Desc
Flags *prometheus.Desc
Metric *prometheus.Desc
@@ -14,9 +17,11 @@ type MSCluster_NetworkCollector struct {
State *prometheus.Desc
}
func newMSCluster_NetworkCollector() (Collector, error) {
func newMSCluster_NetworkCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_network"
return &MSCluster_NetworkCollector{
logger: log.With(logger, "collector", subsystem),
Characteristics: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
"Provides the characteristics of the network.",
@@ -66,7 +71,7 @@ type MSCluster_Network struct {
// to the provided prometheus Metric channel.
func (c *MSCluster_NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Network
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}

View File

@@ -1,12 +1,15 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_NodeCollector is a Prometheus collector for WMI MSCluster_Node metrics
type MSCluster_NodeCollector struct {
logger log.Logger
BuildNumber *prometheus.Desc
Characteristics *prometheus.Desc
DetectedCloudPlatform *prometheus.Desc
@@ -23,9 +26,10 @@ type MSCluster_NodeCollector struct {
StatusInformation *prometheus.Desc
}
func newMSCluster_NodeCollector() (Collector, error) {
func newMSCluster_NodeCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_node"
return &MSCluster_NodeCollector{
logger: log.With(logger, "collector", subsystem),
BuildNumber: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "build_number"),
"Provides access to the node's BuildNumber property.",
@@ -138,7 +142,7 @@ type MSCluster_Node struct {
// to the provided prometheus Metric channel.
func (c *MSCluster_NodeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Node
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}

View File

@@ -1,12 +1,15 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_ResourceCollector is a Prometheus collector for WMI MSCluster_Resource metrics
type MSCluster_ResourceCollector struct {
logger log.Logger
Characteristics *prometheus.Desc
DeadlockTimeout *prometheus.Desc
EmbeddedFailureAction *prometheus.Desc
@@ -25,9 +28,10 @@ type MSCluster_ResourceCollector struct {
Subclass *prometheus.Desc
}
func newMSCluster_ResourceCollector() (Collector, error) {
func newMSCluster_ResourceCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_resource"
return &MSCluster_ResourceCollector{
logger: log.With(logger, "collector", subsystem),
Characteristics: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
"Provides the characteristics of the object.",
@@ -156,7 +160,7 @@ type MSCluster_Resource struct {
// to the provided prometheus Metric channel.
func (c *MSCluster_ResourceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Resource
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}

View File

@@ -1,12 +1,15 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A MSCluster_ResourceGroupCollector is a Prometheus collector for WMI MSCluster_ResourceGroup metrics
type MSCluster_ResourceGroupCollector struct {
logger log.Logger
AutoFailbackType *prometheus.Desc
Characteristics *prometheus.Desc
ColdStartSetting *prometheus.Desc
@@ -24,9 +27,10 @@ type MSCluster_ResourceGroupCollector struct {
State *prometheus.Desc
}
func newMSCluster_ResourceGroupCollector() (Collector, error) {
func newMSCluster_ResourceGroupCollector(logger log.Logger) (Collector, error) {
const subsystem = "mscluster_resourcegroup"
return &MSCluster_ResourceGroupCollector{
logger: log.With(logger, "collector", subsystem),
AutoFailbackType: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "auto_failback_type"),
"Provides access to the group's AutoFailbackType property.",
@@ -132,7 +136,7 @@ type MSCluster_ResourceGroup struct {
// to the provided prometheus Metric channel.
func (c *MSCluster_ResourceGroupCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_ResourceGroup
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}

View File

@@ -7,7 +7,8 @@ import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
@@ -22,6 +23,8 @@ var (
// A Win32_PerfRawData_MSMQ_MSMQQueueCollector is a Prometheus collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics
type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
logger log.Logger
BytesinJournalQueue *prometheus.Desc
BytesinQueue *prometheus.Desc
MessagesinJournalQueue *prometheus.Desc
@@ -36,14 +39,17 @@ func newMSMQCollectorFlags(app *kingpin.Application) {
}
// NewWin32_PerfRawData_MSMQ_MSMQQueueCollector ...
func newMSMQCollector() (Collector, error) {
func newMSMQCollector(logger log.Logger) (Collector, error) {
const subsystem = "msmq"
logger = log.With(logger, "collector", subsystem)
if *msmqWhereClause == "" {
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
level.Warn(logger).Log("msg", "No where-clause specified for msmq collector. This will generate a very large number of metrics!")
}
return &Win32_PerfRawData_MSMQ_MSMQQueueCollector{
logger: logger,
BytesinJournalQueue: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_journal_queue"),
"Size of queue journal in bytes",
@@ -76,7 +82,7 @@ func newMSMQCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting msmq metrics:", desc, err)
level.Error(c.logger).Log("failed collecting msmq metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -93,7 +99,7 @@ type Win32_PerfRawData_MSMQ_MSMQQueue struct {
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_MSMQ_MSMQQueue
q := queryAllWhere(&dst, c.queryWhereClause)
q := queryAllWhere(&dst, c.queryWhereClause, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -12,7 +12,8 @@ import (
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
)
@@ -30,7 +31,7 @@ var (
type mssqlInstancesType map[string]string
func getMSSQLInstances() mssqlInstancesType {
func getMSSQLInstances(logger log.Logger) mssqlInstancesType {
sqlInstances := make(mssqlInstancesType)
// in case querying the registry fails, return the default instance
@@ -40,19 +41,19 @@ func getMSSQLInstances() mssqlInstancesType {
regkey := `Software\Microsoft\Microsoft SQL Server\Instance Names\SQL`
k, err := registry.OpenKey(registry.LOCAL_MACHINE, regkey, registry.QUERY_VALUE)
if err != nil {
log.Warn("Couldn't open registry to determine SQL instances:", err)
level.Warn(logger).Log("msg", "Couldn't open registry to determine SQL instances", "err", err)
return sqlDefaultInstance
}
defer func() {
err = k.Close()
if err != nil {
log.Warnf("Failed to close registry key: %v", err)
level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
}
}()
instanceNames, err := k.ReadValueNames(0)
if err != nil {
log.Warnf("Can't ReadSubKeyNames %#v", err)
level.Warn(logger).Log("msg", "Can't ReadSubKeyNames", "err", err)
return sqlDefaultInstance
}
@@ -62,7 +63,7 @@ func getMSSQLInstances() mssqlInstancesType {
}
}
log.Debugf("Detected MSSQL Instances: %#v\n", sqlInstances)
level.Debug(logger).Log("msg", fmt.Sprintf("Detected MSSQL Instances: %#v\n", sqlInstances))
return sqlInstances
}
@@ -130,6 +131,8 @@ func mssqlGetPerfObjectName(sqlInstance string, collector string) string {
// A MSSQLCollector is a Prometheus collector for various WMI Win32_PerfRawData_MSSQLSERVER_* metrics
type MSSQLCollector struct {
logger log.Logger
// meta
mssqlScrapeDurationDesc *prometheus.Desc
mssqlScrapeSuccessDesc *prometheus.Desc
@@ -414,12 +417,12 @@ func newMSSQLCollectorFlags(app *kingpin.Application) {
}
// newMSSQLCollector ...
func newMSSQLCollector() (Collector, error) {
func newMSSQLCollector(logger log.Logger) (Collector, error) {
const subsystem = "mssql"
logger = log.With(logger, "collector", subsystem)
enabled := expandEnabledChildCollectors(*mssqlEnabledCollectors)
mssqlInstances := getMSSQLInstances()
mssqlInstances := getMSSQLInstances(logger)
perfCounters := make([]string, 0, len(mssqlInstances)*len(enabled))
for instance := range mssqlInstances {
for _, c := range enabled {
@@ -429,6 +432,7 @@ func newMSSQLCollector() (Collector, error) {
addPerfCounterDependencies(subsystem, perfCounters)
mssqlCollector := MSSQLCollector{
logger: logger,
// meta
mssqlScrapeDurationDesc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "collector_duration_seconds"),
@@ -1929,11 +1933,11 @@ func (c *MSSQLCollector) execute(ctx *ScrapeContext, name string, fn mssqlCollec
var success float64
if err != nil {
log.Errorf("mssql class collector %s failed after %fs: %s", name, duration.Seconds(), err)
level.Error(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s failed after %fs", name, duration.Seconds()), "err", err)
success = 0
c.mssqlChildCollectorFailure++
} else {
log.Debugf("mssql class collector %s succeeded after %fs.", name, duration.Seconds())
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s succeeded after %fs.", name, duration.Seconds()))
success = 1
}
ch <- prometheus.MustNewConstMetric(
@@ -2024,9 +2028,9 @@ type mssqlAccessMethods struct {
func (c *MSSQLCollector) collectAccessMethods(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlAccessMethods
log.Debugf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst, c.logger); err != nil {
return nil, err
}
@@ -2359,9 +2363,9 @@ type mssqlAvailabilityReplica struct {
func (c *MSSQLCollector) collectAvailabilityReplica(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlAvailabilityReplica
log.Debugf("mssql_availreplica collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst, c.logger); err != nil {
return nil, err
}
@@ -2467,9 +2471,9 @@ type mssqlBufferManager struct {
func (c *MSSQLCollector) collectBufferManager(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlBufferManager
log.Debugf("mssql_bufman collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst, c.logger); err != nil {
return nil, err
}
@@ -2671,9 +2675,9 @@ type mssqlDatabaseReplica struct {
func (c *MSSQLCollector) collectDatabaseReplica(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlDatabaseReplica
log.Debugf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst, c.logger); err != nil {
return nil, err
}
@@ -2910,9 +2914,9 @@ type mssqlDatabases struct {
func (c *MSSQLCollector) collectDatabases(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlDatabases
log.Debugf("mssql_databases collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst, c.logger); err != nil {
return nil, err
}
@@ -3292,9 +3296,9 @@ type mssqlGeneralStatistics struct {
func (c *MSSQLCollector) collectGeneralStatistics(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlGeneralStatistics
log.Debugf("mssql_genstats collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst, c.logger); err != nil {
return nil, err
}
@@ -3487,9 +3491,9 @@ type mssqlLocks struct {
func (c *MSSQLCollector) collectLocks(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlLocks
log.Debugf("mssql_locks collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst, c.logger); err != nil {
return nil, err
}
@@ -3585,9 +3589,9 @@ type mssqlMemoryManager struct {
func (c *MSSQLCollector) collectMemoryManager(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlMemoryManager
log.Debugf("mssql_memmgr collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst, c.logger); err != nil {
return nil, err
}
@@ -3754,9 +3758,9 @@ type mssqlSQLStatistics struct {
func (c *MSSQLCollector) collectSQLStats(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlSQLStatistics
log.Debugf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst, c.logger); err != nil {
return nil, err
}
@@ -3862,9 +3866,9 @@ type mssqlWaitStatistics struct {
func (c *MSSQLCollector) collectWaitStats(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlWaitStatistics
log.Debugf("mssql_waitstats collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst, c.logger); err != nil {
return nil, err
}
@@ -3968,9 +3972,9 @@ type mssqlSQLErrors struct {
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object
func (c *MSSQLCollector) collectSQLErrors(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlSQLErrors
log.Debugf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst, c.logger); err != nil {
return nil, err
}
@@ -4011,9 +4015,9 @@ type mssqlTransactions struct {
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
func (c *MSSQLCollector) collectTransactions(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlTransactions
log.Debugf("mssql_transactions collector iterating sql instance %s.", sqlInstance)
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -9,7 +9,8 @@ import (
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@@ -36,6 +37,8 @@ var (
// A NetworkCollector is a Prometheus collector for Perflib Network Interface metrics
type NetworkCollector struct {
logger log.Logger
BytesReceivedTotal *prometheus.Desc
BytesSentTotal *prometheus.Desc
BytesTotal *prometheus.Desc
@@ -84,10 +87,13 @@ func newNetworkCollectorFlags(app *kingpin.Application) {
}
// newNetworkCollector ...
func newNetworkCollector() (Collector, error) {
func newNetworkCollector(logger log.Logger) (Collector, error) {
const subsystem = "net"
logger = log.With(logger, "collector", subsystem)
if *nicOldExclude != "" {
if !nicExcludeSet {
log.Warnln("msg", "--collector.net.nic-blacklist is DEPRECATED and will be removed in a future release, use --collector.net.nic-exclude")
level.Warn(logger).Log("msg", "--collector.net.nic-blacklist is DEPRECATED and will be removed in a future release, use --collector.net.nic-exclude")
*nicExclude = *nicOldExclude
} else {
return nil, errors.New("--collector.net.nic-blacklist and --collector.net.nic-exclude are mutually exclusive")
@@ -95,16 +101,15 @@ func newNetworkCollector() (Collector, error) {
}
if *nicOldInclude != "" {
if !nicIncludeSet {
log.Warnln("msg", "--collector.net.nic-whitelist is DEPRECATED and will be removed in a future release, use --collector.net.nic-include")
level.Warn(logger).Log("msg", "--collector.net.nic-whitelist is DEPRECATED and will be removed in a future release, use --collector.net.nic-include")
*nicInclude = *nicOldInclude
} else {
return nil, errors.New("--collector.net.nic-whitelist and --collector.net.nic-include are mutually exclusive")
}
}
const subsystem = "net"
return &NetworkCollector{
logger: logger,
BytesReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "bytes_received_total"),
"(Network.BytesReceivedPerSec)",
@@ -193,7 +198,7 @@ func newNetworkCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting net metrics:", desc, err)
level.Error(c.logger).Log("failed collecting net metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -227,7 +232,7 @@ type networkInterface struct {
func (c *NetworkCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []networkInterface
if err := unmarshalObject(ctx.perfObjects["Network Interface"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["Network Interface"], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -4,13 +4,16 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRExceptionsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics
type NETFramework_NETCLRExceptionsCollector struct {
logger log.Logger
NumberofExcepsThrown *prometheus.Desc
NumberofFilters *prometheus.Desc
NumberofFinallys *prometheus.Desc
@@ -18,9 +21,11 @@ type NETFramework_NETCLRExceptionsCollector struct {
}
// newNETFramework_NETCLRExceptionsCollector ...
func newNETFramework_NETCLRExceptionsCollector() (Collector, error) {
func newNETFramework_NETCLRExceptionsCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrexceptions"
return &NETFramework_NETCLRExceptionsCollector{
logger: log.With(logger, "collector", subsystem),
NumberofExcepsThrown: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "exceptions_thrown_total"),
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
@@ -52,7 +57,7 @@ func newNETFramework_NETCLRExceptionsCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -70,7 +75,7 @@ type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
func (c *NETFramework_NETCLRExceptionsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,22 +4,26 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRInteropCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics
type NETFramework_NETCLRInteropCollector struct {
logger log.Logger
NumberofCCWs *prometheus.Desc
Numberofmarshalling *prometheus.Desc
NumberofStubs *prometheus.Desc
}
// newNETFramework_NETCLRInteropCollector ...
func newNETFramework_NETCLRInteropCollector() (Collector, error) {
func newNETFramework_NETCLRInteropCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrinterop"
return &NETFramework_NETCLRInteropCollector{
logger: log.With(logger, "collector", subsystem),
NumberofCCWs: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "com_callable_wrappers_total"),
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
@@ -45,7 +49,7 @@ func newNETFramework_NETCLRInteropCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -63,7 +67,7 @@ type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
func (c *NETFramework_NETCLRInteropCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,13 +4,16 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRJitCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics
type NETFramework_NETCLRJitCollector struct {
logger log.Logger
NumberofMethodsJitted *prometheus.Desc
TimeinJit *prometheus.Desc
StandardJitFailures *prometheus.Desc
@@ -18,9 +21,10 @@ type NETFramework_NETCLRJitCollector struct {
}
// newNETFramework_NETCLRJitCollector ...
func newNETFramework_NETCLRJitCollector() (Collector, error) {
func newNETFramework_NETCLRJitCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrjit"
return &NETFramework_NETCLRJitCollector{
logger: log.With(logger, "collector", subsystem),
NumberofMethodsJitted: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "jit_methods_total"),
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
@@ -52,7 +56,7 @@ func newNETFramework_NETCLRJitCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrjit metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -72,7 +76,7 @@ type Win32_PerfRawData_NETFramework_NETCLRJit struct {
func (c *NETFramework_NETCLRJitCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,13 +4,16 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRLoadingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics
type NETFramework_NETCLRLoadingCollector struct {
logger log.Logger
BytesinLoaderHeap *prometheus.Desc
Currentappdomains *prometheus.Desc
CurrentAssemblies *prometheus.Desc
@@ -23,9 +26,10 @@ type NETFramework_NETCLRLoadingCollector struct {
}
// newNETFramework_NETCLRLoadingCollector ...
func newNETFramework_NETCLRLoadingCollector() (Collector, error) {
func newNETFramework_NETCLRLoadingCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrloading"
return &NETFramework_NETCLRLoadingCollector{
logger: log.With(logger, "collector", subsystem),
BytesinLoaderHeap: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "loader_heap_size_bytes"),
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
@@ -87,7 +91,7 @@ func newNETFramework_NETCLRLoadingCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrloading metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -116,7 +120,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
func (c *NETFramework_NETCLRLoadingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,13 +4,16 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRLocksAndThreadsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics
type NETFramework_NETCLRLocksAndThreadsCollector struct {
logger log.Logger
CurrentQueueLength *prometheus.Desc
NumberofcurrentlogicalThreads *prometheus.Desc
NumberofcurrentphysicalThreads *prometheus.Desc
@@ -21,9 +24,10 @@ type NETFramework_NETCLRLocksAndThreadsCollector struct {
}
// newNETFramework_NETCLRLocksAndThreadsCollector ...
func newNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
func newNETFramework_NETCLRLocksAndThreadsCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrlocksandthreads"
return &NETFramework_NETCLRLocksAndThreadsCollector{
logger: log.With(logger, "collector", subsystem),
CurrentQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "current_queue_length"),
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
@@ -73,7 +77,7 @@ func newNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -96,7 +100,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
func (c *NETFramework_NETCLRLocksAndThreadsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,13 +4,16 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRMemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics
type NETFramework_NETCLRMemoryCollector struct {
logger log.Logger
AllocatedBytes *prometheus.Desc
FinalizationSurvivors *prometheus.Desc
HeapSize *prometheus.Desc
@@ -29,9 +32,10 @@ type NETFramework_NETCLRMemoryCollector struct {
}
// newNETFramework_NETCLRMemoryCollector ...
func newNETFramework_NETCLRMemoryCollector() (Collector, error) {
func newNETFramework_NETCLRMemoryCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrmemory"
return &NETFramework_NETCLRMemoryCollector{
logger: log.With(logger, "collector", subsystem),
AllocatedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "allocated_bytes_total"),
"Displays the total number of bytes allocated on the garbage collection heap.",
@@ -111,7 +115,7 @@ func newNETFramework_NETCLRMemoryCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -153,7 +157,7 @@ type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
func (c *NETFramework_NETCLRMemoryCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,13 +4,16 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRRemotingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics
type NETFramework_NETCLRRemotingCollector struct {
logger log.Logger
Channels *prometheus.Desc
ContextBoundClassesLoaded *prometheus.Desc
ContextBoundObjects *prometheus.Desc
@@ -20,9 +23,10 @@ type NETFramework_NETCLRRemotingCollector struct {
}
// newNETFramework_NETCLRRemotingCollector ...
func newNETFramework_NETCLRRemotingCollector() (Collector, error) {
func newNETFramework_NETCLRRemotingCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrremoting"
return &NETFramework_NETCLRRemotingCollector{
logger: log.With(logger, "collector", subsystem),
Channels: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "channels_total"),
"Displays the total number of remoting channels registered across all application domains since application started.",
@@ -66,7 +70,7 @@ func newNETFramework_NETCLRRemotingCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -86,7 +90,7 @@ type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
func (c *NETFramework_NETCLRRemotingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,13 +4,16 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A NETFramework_NETCLRSecurityCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics
type NETFramework_NETCLRSecurityCollector struct {
logger log.Logger
NumberLinkTimeChecks *prometheus.Desc
TimeinRTchecks *prometheus.Desc
StackWalkDepth *prometheus.Desc
@@ -18,9 +21,10 @@ type NETFramework_NETCLRSecurityCollector struct {
}
// newNETFramework_NETCLRSecurityCollector ...
func newNETFramework_NETCLRSecurityCollector() (Collector, error) {
func newNETFramework_NETCLRSecurityCollector(logger log.Logger) (Collector, error) {
const subsystem = "netframework_clrsecurity"
return &NETFramework_NETCLRSecurityCollector{
logger: log.With(logger, "collector", subsystem),
NumberLinkTimeChecks: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "link_time_checks_total"),
"Displays the total number of link-time code access security checks since the application started.",
@@ -52,7 +56,7 @@ func newNETFramework_NETCLRSecurityCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -71,7 +75,7 @@ type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
func (c *NETFramework_NETCLRSecurityCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -9,16 +9,19 @@ import (
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/headers/netapi32"
"github.com/prometheus-community/windows_exporter/headers/psapi"
"github.com/prometheus-community/windows_exporter/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
)
// A OSCollector is a Prometheus collector for WMI metrics
type OSCollector struct {
logger log.Logger
OSInformation *prometheus.Desc
PhysicalMemoryFreeBytes *prometheus.Desc
PagingFreeBytes *prometheus.Desc
@@ -41,10 +44,12 @@ type pagingFileCounter struct {
}
// newOSCollector ...
func newOSCollector() (Collector, error) {
func newOSCollector(logger log.Logger) (Collector, error) {
const subsystem = "os"
return &OSCollector{
logger: log.With(logger, "collector", subsystem),
OSInformation: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "info"),
"OperatingSystem.Caption, OperatingSystem.Version",
@@ -130,7 +135,7 @@ func newOSCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting os metrics:", desc, err)
level.Error(c.logger).Log("failed collecting os metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -200,7 +205,7 @@ func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (
file, err := os.Stat(fileString)
// For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting.
if err != nil {
log.Debugf("Failed to read page file (reason: %s): %s\n", err, fileString)
level.Debug(c.logger).Log("msg", fmt.Sprintf("Failed to read page file (reason: %s): %s\n", err, fileString))
} else {
fsipf += float64(file.Size())
}
@@ -212,7 +217,7 @@ func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (
}
var pfc = make([]pagingFileCounter, 0)
if err := unmarshalObject(ctx.perfObjects["Paging File"], &pfc); err != nil {
if err := unmarshalObject(ctx.perfObjects["Paging File"], &pfc, c.logger); err != nil {
return nil, err
}
@@ -271,7 +276,7 @@ func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (
fsipf,
)
} else {
log.Debugln("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
level.Debug(c.logger).Log("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
}
ch <- prometheus.MustNewConstMetric(
c.VirtualMemoryFreeBytes,

View File

@@ -6,9 +6,10 @@ import (
"strconv"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
perflibCollector "github.com/leoluk/perflib_exporter/collector"
"github.com/leoluk/perflib_exporter/perflib"
"github.com/prometheus-community/windows_exporter/log"
)
var nametable = perflib.QueryNameTable("Counter 009") // Reads the names in English TODO: validate that the English names are always present
@@ -30,7 +31,7 @@ func getPerflibSnapshot(objNames string) (map[string]*perflib.PerfObject, error)
return indexed, nil
}
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
func unmarshalObject(obj *perflib.PerfObject, vs interface{}, logger log.Logger) error {
if obj == nil {
return fmt.Errorf("counter not found")
}
@@ -81,7 +82,7 @@ func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
ctr, found := counters[tag]
if !found {
log.Debugf("missing counter %q, have %v", tag, counterMapKeys(counters))
level.Debug(logger).Log("msg", fmt.Sprintf("missing counter %q, have %v", tag, counterMapKeys(counters)))
continue
}
if !target.Field(i).CanSet() {

View File

@@ -4,6 +4,8 @@ import (
"reflect"
"testing"
"github.com/go-kit/log"
perflibCollector "github.com/leoluk/perflib_exporter/collector"
"github.com/leoluk/perflib_exporter/perflib"
)
@@ -112,7 +114,7 @@ func TestUnmarshalPerflib(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
output := make([]simple, 0)
err := unmarshalObject(c.obj, &output)
err := unmarshalObject(c.obj, &output, log.NewNopLogger())
if err != nil && !c.expectError {
t.Errorf("Did not expect error, got %q", err)
}

View File

@@ -11,7 +11,8 @@ import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
@@ -36,6 +37,8 @@ var (
)
type processCollector struct {
logger log.Logger
StartTime *prometheus.Desc
CPUTimeTotal *prometheus.Desc
HandleCount *prometheus.Desc
@@ -85,12 +88,13 @@ func newProcessCollectorFlags(app *kingpin.Application) {
}
// NewProcessCollector ...
func newProcessCollector() (Collector, error) {
func newProcessCollector(logger log.Logger) (Collector, error) {
const subsystem = "process"
logger = log.With(logger, "collector", subsystem)
if *processOldExclude != "" {
if !processExcludeSet {
log.Warnln("msg", "--collector.process.blacklist is DEPRECATED and will be removed in a future release, use --collector.process.exclude")
level.Warn(logger).Log("msg", "--collector.process.blacklist is DEPRECATED and will be removed in a future release, use --collector.process.exclude")
*processExclude = *processOldExclude
} else {
return nil, errors.New("--collector.process.blacklist and --collector.process.exclude are mutually exclusive")
@@ -98,7 +102,7 @@ func newProcessCollector() (Collector, error) {
}
if *processOldInclude != "" {
if !processIncludeSet {
log.Warnln("msg", "--collector.process.whitelist is DEPRECATED and will be removed in a future release, use --collector.process.include")
level.Warn(logger).Log("msg", "--collector.process.whitelist is DEPRECATED and will be removed in a future release, use --collector.process.include")
*processInclude = *processOldInclude
} else {
return nil, errors.New("--collector.process.whitelist and --collector.process.include are mutually exclusive")
@@ -106,10 +110,11 @@ func newProcessCollector() (Collector, error) {
}
if *processInclude == ".*" && *processExclude == "" {
log.Warn("No filters specified for process collector. This will generate a very large number of metrics!")
level.Warn(logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
}
return &processCollector{
logger: logger,
StartTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "start_time"),
"Time of process start.",
@@ -244,15 +249,15 @@ type WorkerProcess struct {
func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcess, 0)
err := unmarshalObject(ctx.perfObjects["Process"], &data)
err := unmarshalObject(ctx.perfObjects["Process"], &data, c.logger)
if err != nil {
return err
}
var dst_wp []WorkerProcess
q_wp := queryAll(&dst_wp)
q_wp := queryAll(&dst_wp, c.logger)
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
log.Debugf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err)
level.Debug(c.logger).Log("msg", fmt.Sprintf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err))
}
for _, process := range data {

View File

@@ -8,7 +8,8 @@ import (
"sync"
"time"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@@ -45,14 +46,16 @@ var (
type Prometheus struct {
maxScrapeDuration time.Duration
collectors map[string]Collector
logger log.Logger
}
// NewPrometheus returns a new Prometheus where the set of collectors must
// return metrics within the given timeout.
func NewPrometheus(timeout time.Duration, cs map[string]Collector) *Prometheus {
func NewPrometheus(timeout time.Duration, cs map[string]Collector, logger log.Logger) *Prometheus {
return &Prometheus{
maxScrapeDuration: timeout,
collectors: cs,
logger: logger,
}
}
@@ -113,7 +116,7 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
for name, c := range coll.collectors {
go func(name string, c Collector) {
defer wg.Done()
outcome := execute(name, c, scrapeContext, metricsBuffer)
outcome := execute(name, c, scrapeContext, metricsBuffer, coll.logger)
l.Lock()
if !finished {
collectorOutcomes[name] = outcome
@@ -164,13 +167,13 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
}
if len(remainingCollectorNames) > 0 {
log.Warn("Collection timed out, still waiting for ", remainingCollectorNames)
level.Warn(coll.logger).Log("msg", fmt.Sprintf("Collection timed out, still waiting for %v", remainingCollectorNames))
}
l.Unlock()
}
func execute(name string, c Collector, ctx *ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome {
func execute(name string, c Collector, ctx *ScrapeContext, ch chan<- prometheus.Metric, logger log.Logger) collectorOutcome {
t := time.Now()
err := c.Collect(ctx, ch)
duration := time.Since(t).Seconds()
@@ -182,9 +185,9 @@ func execute(name string, c Collector, ctx *ScrapeContext, ch chan<- prometheus.
)
if err != nil {
log.Errorf("collector %s failed after %fs: %s", name, duration, err)
level.Error(logger).Log("msg", fmt.Sprintf("collector %s failed after %fs", name, duration), "err", err)
return failed
}
log.Debugf("collector %s succeeded after %fs.", name, duration)
level.Debug(logger).Log("msg", fmt.Sprintf("collector %s succeeded after %fs.", name, duration))
return success
}

View File

@@ -6,7 +6,8 @@ package collector
import (
"strings"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@@ -16,6 +17,8 @@ import (
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/
type RemoteFxCollector struct {
logger log.Logger
// net
BaseTCPRTT *prometheus.Desc
BaseUDPRTT *prometheus.Desc
@@ -39,9 +42,11 @@ type RemoteFxCollector struct {
}
// newRemoteFx ...
func newRemoteFx() (Collector, error) {
func newRemoteFx(logger log.Logger) (Collector, error) {
const subsystem = "remote_fx"
return &RemoteFxCollector{
logger: log.With(logger, "collector", subsystem),
// net
BaseTCPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_base_tcp_rtt_seconds"),
@@ -154,11 +159,11 @@ func newRemoteFx() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *RemoteFxCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -180,7 +185,7 @@ type perflibRemoteFxNetwork struct {
func (c *RemoteFxCollector) collectRemoteFXNetworkCount(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteFxNetwork, 0)
err := unmarshalObject(ctx.perfObjects["RemoteFX Network"], &dst)
err := unmarshalObject(ctx.perfObjects["RemoteFX Network"], &dst, c.logger)
if err != nil {
return nil, err
}
@@ -270,7 +275,7 @@ type perflibRemoteFxGraphics struct {
func (c *RemoteFxCollector) collectRemoteFXGraphicsCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteFxGraphics, 0)
err := unmarshalObject(ctx.perfObjects["RemoteFX Graphics"], &dst)
err := unmarshalObject(ctx.perfObjects["RemoteFX Graphics"], &dst, c.logger)
if err != nil {
return nil, err
}

View File

@@ -11,9 +11,10 @@ import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
ole "github.com/go-ole/go-ole"
"github.com/go-ole/go-ole/oleutil"
"github.com/prometheus-community/windows_exporter/log"
"github.com/prometheus/client_golang/prometheus"
)
@@ -37,6 +38,8 @@ var (
)
type ScheduledTaskCollector struct {
logger log.Logger
LastResult *prometheus.Desc
MissedRuns *prometheus.Desc
State *prometheus.Desc
@@ -101,10 +104,13 @@ func newScheduledTaskFlags(app *kingpin.Application) {
}
// newScheduledTask ...
func newScheduledTask() (Collector, error) {
func newScheduledTask(logger log.Logger) (Collector, error) {
const subsystem = "scheduled_task"
logger = log.With(logger, "collector", subsystem)
if *taskOldExclude != "" {
if !taskExcludeSet {
log.Warnln("msg", "--collector.scheduled_task.blacklist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.exclude")
level.Warn(logger).Log("msg", "--collector.scheduled_task.blacklist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.exclude")
*taskExclude = *taskOldExclude
} else {
return nil, errors.New("--collector.scheduled_task.blacklist and --collector.scheduled_task.exclude are mutually exclusive")
@@ -112,15 +118,13 @@ func newScheduledTask() (Collector, error) {
}
if *taskOldInclude != "" {
if !taskIncludeSet {
log.Warnln("msg", "--collector.scheduled_task.whitelist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.include")
level.Warn(logger).Log("msg", "--collector.scheduled_task.whitelist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.include")
*taskInclude = *taskOldInclude
} else {
return nil, errors.New("--collector.scheduled_task.whitelist and --collector.scheduled_task.include are mutually exclusive")
}
}
const subsystem = "scheduled_task"
runtime.LockOSThread()
defer runtime.UnlockOSThread()
@@ -134,6 +138,7 @@ func newScheduledTask() (Collector, error) {
defer ole.CoUninitialize()
return &ScheduledTaskCollector{
logger: logger,
LastResult: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "last_result"),
"The result that was returned the last time the registered task was run",
@@ -162,7 +167,7 @@ func newScheduledTask() (Collector, error) {
func (c *ScheduledTaskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting user metrics:", desc, err)
level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
return err
}

View File

@@ -9,7 +9,8 @@ import (
"syscall"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows"
@@ -28,6 +29,8 @@ var (
// A serviceCollector is a Prometheus collector for WMI Win32_Service metrics
type serviceCollector struct {
logger log.Logger
Information *prometheus.Desc
State *prometheus.Desc
StartMode *prometheus.Desc
@@ -49,17 +52,20 @@ func newServiceCollectorFlags(app *kingpin.Application) {
}
// newserviceCollector ...
func newserviceCollector() (Collector, error) {
func newserviceCollector(logger log.Logger) (Collector, error) {
const subsystem = "service"
logger = log.With(logger, "collector", subsystem)
if *serviceWhereClause == "" {
log.Warn("No where-clause specified for service collector. This will generate a very large number of metrics!")
level.Warn(logger).Log("msg", "No where-clause specified for service collector. This will generate a very large number of metrics!")
}
if *useAPI {
log.Warn("API collection is enabled.")
level.Warn(logger).Log("msg", "API collection is enabled.")
}
return &serviceCollector{
logger: logger,
Information: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "info"),
"A metric with a constant '1' value labeled with service information",
@@ -93,12 +99,12 @@ func newserviceCollector() (Collector, error) {
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if *useAPI {
if err := c.collectAPI(ch); err != nil {
log.Error("failed collecting API service metrics:", err)
level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err)
return err
}
} else {
if err := c.collectWMI(ch); err != nil {
log.Error("failed collecting WMI service metrics:", err)
level.Error(c.logger).Log("msg", "failed collecting WMI service metrics:", "err", err)
return err
}
}
@@ -169,7 +175,7 @@ var (
func (c *serviceCollector) collectWMI(ch chan<- prometheus.Metric) error {
var dst []Win32_Service
q := queryAllWhere(&dst, c.queryWhereClause)
q := queryAllWhere(&dst, c.queryWhereClause, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return err
}
@@ -253,14 +259,14 @@ func (c *serviceCollector) collectAPI(ch chan<- prometheus.Metric) error {
// Get UTF16 service name.
serviceName, err := syscall.UTF16PtrFromString(service)
if err != nil {
log.Warnf("Service %s get name error: %#v", service, err)
level.Warn(c.logger).Log("msg", fmt.Sprintf("Service %s get name error: %#v", service, err))
continue
}
// Open connection for service handler.
serviceHandle, err := windows.OpenService(svcmgrConnection.Handle, serviceName, windows.GENERIC_READ)
if err != nil {
log.Warnf("Open service %s error: %#v", service, err)
level.Warn(c.logger).Log("msg", fmt.Sprintf("Open service %s error: %#v", service, err))
continue
}
@@ -271,14 +277,14 @@ func (c *serviceCollector) collectAPI(ch chan<- prometheus.Metric) error {
// Get Service Configuration.
serviceConfig, err := serviceManager.Config()
if err != nil {
log.Warnf("Get ervice %s config error: %#v", service, err)
level.Warn(c.logger).Log("msg", fmt.Sprintf("Get ervice %s config error: %#v", service, err))
continue
}
// Get Service Current Status.
serviceStatus, err := serviceManager.Query()
if err != nil {
log.Warnf("Get service %s status error: %#v", service, err)
level.Warn(c.logger).Log("msg", fmt.Sprintf("Get service %s status error: %#v", service, err))
continue
}

View File

@@ -6,10 +6,12 @@ package collector
import (
"errors"
"fmt"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/prometheus/client_golang/prometheus"
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
const (
@@ -32,6 +34,8 @@ var (
)
type SMTPCollector struct {
logger log.Logger
BadmailedMessagesBadPickupFileTotal *prometheus.Desc
BadmailedMessagesGeneralFailureTotal *prometheus.Desc
BadmailedMessagesHopCountExceededTotal *prometheus.Desc
@@ -106,12 +110,15 @@ func newSMTPCollectorFlags(app *kingpin.Application) {
).Hidden().String()
}
func newSMTPCollector() (Collector, error) {
log.Info("smtp collector is in an experimental state! Metrics for this collector have not been tested.")
func newSMTPCollector(logger log.Logger) (Collector, error) {
const subsystem = "smtp"
logger = log.With(logger, "collector", subsystem)
level.Info(logger).Log("msg", "smtp collector is in an experimental state! Metrics for this collector have not been tested.")
if *serverOldExclude != "" {
if !serverExcludeSet {
log.Warnln("msg", "--collector.smtp.server-blacklist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-exclude")
level.Warn(logger).Log("msg", "--collector.smtp.server-blacklist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-exclude")
*serverExclude = *serverOldExclude
} else {
return nil, errors.New("--collector.smtp.server-blacklist and --collector.smtp.server-exclude are mutually exclusive")
@@ -119,15 +126,15 @@ func newSMTPCollector() (Collector, error) {
}
if *serverOldInclude != "" {
if !serverIncludeSet {
log.Warnln("msg", "--collector.smtp.server-whitelist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-include")
level.Warn(logger).Log("msg", "--collector.smtp.server-whitelist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-include")
*serverInclude = *serverOldInclude
} else {
return nil, errors.New("--collector.smtp.server-whitelist and --collector.smtp.server-include are mutually exclusive")
}
}
const subsystem = "smtp"
return &SMTPCollector{
logger: logger,
BadmailedMessagesBadPickupFileTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_bad_pickup_file_total"),
"Total number of malformed pickup messages sent to badmail",
@@ -390,7 +397,7 @@ func newSMTPCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *SMTPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting smtp metrics:", desc, err)
level.Error(c.logger).Log("failed collecting smtp metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -446,7 +453,7 @@ type PerflibSMTPServer struct {
func (c *SMTPCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []PerflibSMTPServer
if err := unmarshalObject(ctx.perfObjects["SMTP Server"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["SMTP Server"], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -4,12 +4,15 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// A SystemCollector is a Prometheus collector for WMI metrics
type SystemCollector struct {
logger log.Logger
ContextSwitchesTotal *prometheus.Desc
ExceptionDispatchesTotal *prometheus.Desc
ProcessorQueueLength *prometheus.Desc
@@ -19,10 +22,11 @@ type SystemCollector struct {
}
// newSystemCollector ...
func newSystemCollector() (Collector, error) {
func newSystemCollector(logger log.Logger) (Collector, error) {
const subsystem = "system"
return &SystemCollector{
logger: log.With(logger, "collector", subsystem),
ContextSwitchesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "context_switches_total"),
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
@@ -66,7 +70,7 @@ func newSystemCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting system metrics:", desc, err)
level.Error(c.logger).Log("failed collecting system metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -85,7 +89,7 @@ type system struct {
func (c *SystemCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []system
if err := unmarshalObject(ctx.perfObjects["System"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["System"], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -4,12 +4,15 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// A TCPCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_TCPv{4,6} metrics
type TCPCollector struct {
logger log.Logger
ConnectionFailures *prometheus.Desc
ConnectionsActive *prometheus.Desc
ConnectionsEstablished *prometheus.Desc
@@ -22,10 +25,11 @@ type TCPCollector struct {
}
// newTCPCollector ...
func newTCPCollector() (Collector, error) {
func newTCPCollector(logger log.Logger) (Collector, error) {
const subsystem = "tcp"
return &TCPCollector{
logger: log.With(logger, "collector", subsystem),
ConnectionFailures: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_failures_total"),
"(TCP.ConnectionFailures)",
@@ -87,7 +91,7 @@ func newTCPCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting tcp metrics:", desc, err)
level.Error(c.logger).Log("failed collecting tcp metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -169,7 +173,7 @@ func (c *TCPCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric)
var dst []tcp
// TCPv4 counters
if err := unmarshalObject(ctx.perfObjects["TCPv4"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["TCPv4"], &dst, c.logger); err != nil {
return nil, err
}
if len(dst) != 0 {
@@ -177,7 +181,7 @@ func (c *TCPCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric)
}
// TCPv6 counters
if err := unmarshalObject(ctx.perfObjects["TCPv6"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["TCPv6"], &dst, c.logger); err != nil {
return nil, err
}
if len(dst) != 0 {

View File

@@ -6,7 +6,8 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
@@ -19,6 +20,8 @@ import (
// win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics
type teradiciPcoipCollector struct {
logger log.Logger
AudioBytesReceived *prometheus.Desc
AudioBytesSent *prometheus.Desc
AudioRXBWkbitPersec *prometheus.Desc
@@ -63,9 +66,10 @@ type teradiciPcoipCollector struct {
}
// newTeradiciPcoipCollector constructs a new teradiciPcoipCollector
func newTeradiciPcoipCollector() (Collector, error) {
func newTeradiciPcoipCollector(logger log.Logger) (Collector, error) {
const subsystem = "teradici_pcoip"
return &teradiciPcoipCollector{
logger: log.With(logger, "collector", subsystem),
AudioBytesReceived: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "audio_bytes_received_total"),
"(AudioBytesReceived)",
@@ -299,23 +303,23 @@ func newTeradiciPcoipCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *teradiciPcoipCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectAudio(ch); err != nil {
log.Error("failed collecting teradici session audio metrics:", desc, err)
level.Error(c.logger).Log("failed collecting teradici session audio metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectGeneral(ch); err != nil {
log.Error("failed collecting teradici session general metrics:", desc, err)
level.Error(c.logger).Log("failed collecting teradici session general metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectImaging(ch); err != nil {
log.Error("failed collecting teradici session imaging metrics:", desc, err)
level.Error(c.logger).Log("failed collecting teradici session imaging metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectNetwork(ch); err != nil {
log.Error("failed collecting teradici session network metrics:", desc, err)
level.Error(c.logger).Log("failed collecting teradici session network metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectUsb(ch); err != nil {
log.Error("failed collecting teradici session USB metrics:", desc, err)
level.Error(c.logger).Log("failed collecting teradici session USB metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -375,7 +379,7 @@ type win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics struct {
func (c *teradiciPcoipCollector) collectAudio(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -418,7 +422,7 @@ func (c *teradiciPcoipCollector) collectAudio(ch chan<- prometheus.Metric) (*pro
func (c *teradiciPcoipCollector) collectGeneral(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -473,7 +477,7 @@ func (c *teradiciPcoipCollector) collectGeneral(ch chan<- prometheus.Metric) (*p
func (c *teradiciPcoipCollector) collectImaging(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -552,7 +556,7 @@ func (c *teradiciPcoipCollector) collectImaging(ch chan<- prometheus.Metric) (*p
func (c *teradiciPcoipCollector) collectNetwork(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -625,7 +629,7 @@ func (c *teradiciPcoipCollector) collectNetwork(ch chan<- prometheus.Metric) (*p
func (c *teradiciPcoipCollector) collectUsb(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -7,24 +7,21 @@ import (
"errors"
"strings"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
const ConnectionBrokerFeatureID uint32 = 133
var (
connectionBrokerEnabled = isConnectionBrokerServer()
)
type Win32_ServerFeature struct {
ID uint32
}
func isConnectionBrokerServer() bool {
func isConnectionBrokerServer(logger log.Logger) bool {
var dst []Win32_ServerFeature
q := queryAll(&dst)
q := queryAll(&dst, logger)
if err := wmi.Query(q, &dst); err != nil {
return false
}
@@ -33,7 +30,7 @@ func isConnectionBrokerServer() bool {
return true
}
}
log.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
level.Debug(logger).Log("msg", "host is not a connection broker skipping Connection Broker performance metrics.")
return false
}
@@ -42,6 +39,9 @@ func isConnectionBrokerServer() bool {
// https://docs.microsoft.com/en-us/previous-versions/aa394344(v%3Dvs.85)
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/
type TerminalServicesCollector struct {
logger log.Logger
connectionBrokerEnabled bool
LocalSessionCount *prometheus.Desc
ConnectionBrokerPerformance *prometheus.Desc
HandleCount *prometheus.Desc
@@ -62,9 +62,13 @@ type TerminalServicesCollector struct {
}
// newTerminalServicesCollector ...
func newTerminalServicesCollector() (Collector, error) {
func newTerminalServicesCollector(logger log.Logger) (Collector, error) {
const subsystem = "terminal_services"
logger = log.With(logger, "collector", subsystem)
return &TerminalServicesCollector{
logger: logger,
connectionBrokerEnabled: isConnectionBrokerServer(logger),
LocalSessionCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "local_session_count"),
"Number of Terminal Services sessions",
@@ -174,18 +178,18 @@ func newTerminalServicesCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *TerminalServicesCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectTSSessionCount(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectTSSessionCounters(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
// only collect CollectionBrokerPerformance if host is a Connection Broker
if connectionBrokerEnabled {
if c.connectionBrokerEnabled {
if desc, err := c.collectCollectionBrokerPerformanceCounter(ctx, ch); err != nil {
log.Error("failed collecting Connection Broker performance metrics:", desc, err)
level.Error(c.logger).Log("failed collecting Connection Broker performance metrics", "desc", desc, "err", err)
return err
}
}
@@ -200,7 +204,7 @@ type perflibTerminalServices struct {
func (c *TerminalServicesCollector) collectTSSessionCount(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibTerminalServices, 0)
err := unmarshalObject(ctx.perfObjects["Terminal Services"], &dst)
err := unmarshalObject(ctx.perfObjects["Terminal Services"], &dst, c.logger)
if err != nil {
return nil, err
}
@@ -253,7 +257,7 @@ type perflibTerminalServicesSession struct {
func (c *TerminalServicesCollector) collectTSSessionCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibTerminalServicesSession, 0)
err := unmarshalObject(ctx.perfObjects["Terminal Services Session"], &dst)
err := unmarshalObject(ctx.perfObjects["Terminal Services Session"], &dst, c.logger)
if err != nil {
return nil, err
}
@@ -367,7 +371,7 @@ type perflibRemoteDesktopConnectionBrokerCounterset struct {
func (c *TerminalServicesCollector) collectCollectionBrokerPerformanceCounter(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteDesktopConnectionBrokerCounterset, 0)
err := unmarshalObject(ctx.perfObjects["Remote Desktop Connection Broker Counterset"], &dst)
err := unmarshalObject(ctx.perfObjects["Remote Desktop Connection Broker Counterset"], &dst, c.logger)
if err != nil {
return nil, err
}

View File

@@ -27,9 +27,10 @@ import (
"strings"
"time"
kingpin "github.com/alecthomas/kingpin/v2"
"github.com/alecthomas/kingpin/v2"
"github.com/dimchansky/utfbom"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
@@ -51,6 +52,8 @@ var (
)
type textFileCollector struct {
logger log.Logger
path string
// Only set for testing to get predictable output.
mtime *float64
@@ -66,9 +69,11 @@ func newTextFileCollectorFlags(app *kingpin.Application) {
// newTextFileCollector returns a new Collector exposing metrics read from files
// in the given textfile directory.
func newTextFileCollector() (Collector, error) {
func newTextFileCollector(logger log.Logger) (Collector, error) {
const subsystem = "textfile"
return &textFileCollector{
path: *textFileDirectory,
logger: log.With(logger, "collector", subsystem),
path: *textFileDirectory,
}, nil
}
@@ -97,7 +102,7 @@ func duplicateMetricEntry(metricFamilies []*dto.MetricFamily) bool {
return false
}
func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
func (c *textFileCollector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
var valType prometheus.ValueType
var val float64
@@ -113,7 +118,7 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me
for _, metric := range metricFamily.Metric {
if metric.TimestampMs != nil {
log.Warnf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric)
level.Warn(c.logger).Log("msg", fmt.Sprintf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric))
}
labels := metric.GetLabel()
@@ -183,7 +188,7 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me
buckets, values...,
)
default:
log.Errorf("unknown metric type for file")
level.Error(c.logger).Log("msg", "unknown metric type for file")
continue
}
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
@@ -251,7 +256,7 @@ func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Met
// Iterate over files and accumulate their metrics.
files, err := ioutil.ReadDir(c.path)
if err != nil && c.path != "" {
log.Errorf("Error reading textfile collector directory %q: %s", c.path, err)
level.Error(c.logger).Log("msg", fmt.Sprintf("Error reading textfile collector directory %q", c.path), "err", err)
error = 1.0
}
@@ -265,27 +270,27 @@ fileLoop:
continue
}
path := filepath.Join(c.path, f.Name())
log.Debugf("Processing file %q", path)
level.Debug(c.logger).Log("msg", fmt.Sprintf("Processing file %q", path))
file, err := os.Open(path)
if err != nil {
log.Errorf("Error opening %q: %v", path, err)
level.Error(c.logger).Log("msg", fmt.Sprintf("Error opening %q: %v", path, err))
error = 1.0
continue
}
var parser expfmt.TextParser
r, encoding := utfbom.Skip(carriageReturnFilteringReader{r: file})
if err = checkBOM(encoding); err != nil {
log.Errorf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error())
level.Error(c.logger).Log("msg", fmt.Sprintf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error()))
error = 1.0
continue
}
parsedFamilies, err := parser.TextToMetricFamilies(r)
closeErr := file.Close()
if closeErr != nil {
log.Warnf("Error closing file: %v", err)
level.Warn(c.logger).Log("msg", fmt.Sprintf("Error closing file"), "err", err)
}
if err != nil {
log.Errorf("Error parsing %q: %v", path, err)
level.Error(c.logger).Log("msg", fmt.Sprintf("Error parsing %q: %v", path, err))
error = 1.0
continue
}
@@ -297,7 +302,7 @@ fileLoop:
families_array = append(families_array, mf)
for _, m := range mf.Metric {
if m.TimestampMs != nil {
log.Errorf("Textfile %q contains unsupported client-side timestamps, skipping entire file", path)
level.Error(c.logger).Log("msg", fmt.Sprintf("Textfile %q contains unsupported client-side timestamps, skipping entire file", path))
error = 1.0
continue fileLoop
}
@@ -310,7 +315,7 @@ fileLoop:
// If duplicate metrics are detected in a *single* file, skip processing of file metrics
if duplicateMetricEntry(families_array) {
log.Errorf("Duplicate metrics detected in file %s. Skipping file processing.", f.Name())
level.Error(c.logger).Log("msg", fmt.Sprintf("Duplicate metrics detected in file %s. Skipping file processing.", f.Name()))
error = 1.0
continue
}
@@ -326,11 +331,11 @@ fileLoop:
// If duplicates are detected across *multiple* files, return error.
if duplicateMetricEntry(metricFamilies) {
log.Errorf("Duplicate metrics detected across multiple files")
level.Error(c.logger).Log("msg", "Duplicate metrics detected across multiple files")
error = 1.0
} else {
for _, mf := range metricFamilies {
convertMetricFamily(mf, ch)
c.convertMetricFamily(mf, ch)
}
}

View File

@@ -1,11 +1,12 @@
package collector
import (
"github.com/dimchansky/utfbom"
"io/ioutil"
"strings"
"testing"
"github.com/dimchansky/utfbom"
dto "github.com/prometheus/client_model/go"
)

View File

@@ -3,22 +3,26 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A thermalZoneCollector is a Prometheus collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics
type thermalZoneCollector struct {
logger log.Logger
PercentPassiveLimit *prometheus.Desc
Temperature *prometheus.Desc
ThrottleReasons *prometheus.Desc
}
// newThermalZoneCollector ...
func newThermalZoneCollector() (Collector, error) {
func newThermalZoneCollector(logger log.Logger) (Collector, error) {
const subsystem = "thermalzone"
return &thermalZoneCollector{
logger: log.With(logger, "collector", subsystem),
Temperature: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "temperature_celsius"),
"(Temperature)",
@@ -50,7 +54,7 @@ func newThermalZoneCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *thermalZoneCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting thermalzone metrics:", desc, err)
level.Error(c.logger).Log("failed collecting thermalzone metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -68,7 +72,7 @@ type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
func (c *thermalZoneCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -6,12 +6,15 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
// TimeCollector is a Prometheus collector for Perflib counter metrics
type TimeCollector struct {
logger log.Logger
ClockFrequencyAdjustmentPPBTotal *prometheus.Desc
ComputedTimeOffset *prometheus.Desc
NTPClientTimeSourceCount *prometheus.Desc
@@ -20,14 +23,16 @@ type TimeCollector struct {
NTPServerOutgoingResponsesTotal *prometheus.Desc
}
func newTimeCollector() (Collector, error) {
if getWindowsVersion() <= 6.1 {
return nil, errors.New("Windows version older than Server 2016 detected. The time collector will not run and should be disabled via CLI flags or configuration file")
}
func newTimeCollector(logger log.Logger) (Collector, error) {
const subsystem = "time"
logger = log.With(logger, "collector", subsystem)
if getWindowsVersion(logger) < 100 {
return nil, errors.New("Windows version older than Server 2016 detected. The time collector will not run and should be disabled via CLI flags or configuration file")
}
return &TimeCollector{
logger: logger,
ClockFrequencyAdjustmentPPBTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "clock_frequency_adjustment_ppb_total"),
"Total adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units.",
@@ -71,7 +76,7 @@ func newTimeCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *TimeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
log.Error("failed collecting time metrics:", desc, err)
level.Error(c.logger).Log("failed collecting time metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -89,7 +94,7 @@ type windowsTime struct {
func (c *TimeCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []windowsTime // Single-instance class, array is required but will have single entry.
if err := unmarshalObject(ctx.perfObjects["Windows Time Service"], &dst); err != nil {
if err := unmarshalObject(ctx.perfObjects["Windows Time Service"], &dst, c.logger); err != nil {
return nil, err
}

View File

@@ -6,13 +6,16 @@ package collector
import (
"errors"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
// A VmwareCollector is a Prometheus collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics
type VmwareCollector struct {
logger log.Logger
MemActive *prometheus.Desc
MemBallooned *prometheus.Desc
MemLimit *prometheus.Desc
@@ -36,9 +39,10 @@ type VmwareCollector struct {
}
// newVmwareCollector constructs a new VmwareCollector
func newVmwareCollector() (Collector, error) {
func newVmwareCollector(logger log.Logger) (Collector, error) {
const subsystem = "vmware"
return &VmwareCollector{
logger: log.With(logger, "collector", subsystem),
MemActive: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_active_bytes"),
"(MemActiveMB)",
@@ -161,11 +165,11 @@ func newVmwareCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectMem(ch); err != nil {
log.Error("failed collecting vmware memory metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware memory metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectCpu(ch); err != nil {
log.Error("failed collecting vmware cpu metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware cpu metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -198,7 +202,7 @@ type Win32_PerfRawData_vmGuestLib_VCPU struct {
func (c *VmwareCollector) collectMem(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_vmGuestLib_VMem
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -287,7 +291,7 @@ func mbToBytes(mb uint64) float64 {
func (c *VmwareCollector) collectCpu(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_vmGuestLib_VCPU
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -4,7 +4,8 @@
package collector
import (
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
@@ -24,6 +25,8 @@ import (
// win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters
type vmwareBlastCollector struct {
logger log.Logger
AudioReceivedBytes *prometheus.Desc
AudioReceivedPackets *prometheus.Desc
AudioTransmittedBytes *prometheus.Desc
@@ -107,9 +110,10 @@ type vmwareBlastCollector struct {
}
// newVmwareBlastCollector constructs a new vmwareBlastCollector
func newVmwareBlastCollector() (Collector, error) {
func newVmwareBlastCollector(logger log.Logger) (Collector, error) {
const subsystem = "vmware_blast"
return &vmwareBlastCollector{
logger: log.With(logger, "collector", subsystem),
AudioReceivedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "audio_received_bytes_total"),
"(AudioReceivedBytes)",
@@ -542,51 +546,51 @@ func newVmwareBlastCollector() (Collector, error) {
// to the provided prometheus Metric channel.
func (c *vmwareBlastCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectAudio(ch); err != nil {
log.Error("failed collecting vmware blast audio metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast audio metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectCdr(ch); err != nil {
log.Error("failed collecting vmware blast CDR metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast CDR metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectClipboard(ch); err != nil {
log.Error("failed collecting vmware blast clipboard metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast clipboard metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectHtml5Mmr(ch); err != nil {
log.Error("failed collecting vmware blast HTML5 MMR metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast HTML5 MMR metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectImaging(ch); err != nil {
log.Error("failed collecting vmware blast imaging metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast imaging metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectRtav(ch); err != nil {
log.Error("failed collecting vmware blast RTAV metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast RTAV metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectSerialPortandScanner(ch); err != nil {
log.Error("failed collecting vmware blast serial port and scanner metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast serial port and scanner metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectSession(ch); err != nil {
log.Error("failed collecting vmware blast metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectSkypeforBusinessControl(ch); err != nil {
log.Error("failed collecting vmware blast skype for business control metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast skype for business control metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectThinPrint(ch); err != nil {
log.Error("failed collecting vmware blast thin print metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast thin print metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectUsb(ch); err != nil {
log.Error("failed collecting vmware blast USB metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast USB metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectWindowsMediaMmr(ch); err != nil {
log.Error("failed collecting vmware blast windows media MMR metrics:", desc, err)
level.Error(c.logger).Log("failed collecting vmware blast windows media MMR metrics", "desc", desc, "err", err)
return err
}
return nil
@@ -699,7 +703,7 @@ type win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters struct {
func (c *vmwareBlastCollector) collectAudio(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastAudioCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -738,7 +742,7 @@ func (c *vmwareBlastCollector) collectAudio(ch chan<- prometheus.Metric) (*prome
func (c *vmwareBlastCollector) collectCdr(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastCDRCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -777,7 +781,7 @@ func (c *vmwareBlastCollector) collectCdr(ch chan<- prometheus.Metric) (*prometh
func (c *vmwareBlastCollector) collectClipboard(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastClipboardCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -816,7 +820,7 @@ func (c *vmwareBlastCollector) collectClipboard(ch chan<- prometheus.Metric) (*p
func (c *vmwareBlastCollector) collectHtml5Mmr(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastHTML5MMRcounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -855,7 +859,7 @@ func (c *vmwareBlastCollector) collectHtml5Mmr(ch chan<- prometheus.Metric) (*pr
func (c *vmwareBlastCollector) collectImaging(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastImagingCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -942,7 +946,7 @@ func (c *vmwareBlastCollector) collectImaging(ch chan<- prometheus.Metric) (*pro
func (c *vmwareBlastCollector) collectRtav(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastRTAVCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -981,7 +985,7 @@ func (c *vmwareBlastCollector) collectRtav(ch chan<- prometheus.Metric) (*promet
func (c *vmwareBlastCollector) collectSerialPortandScanner(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastSerialPortandScannerCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1020,7 +1024,7 @@ func (c *vmwareBlastCollector) collectSerialPortandScanner(ch chan<- prometheus.
func (c *vmwareBlastCollector) collectSession(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastSessionCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1137,7 +1141,7 @@ func (c *vmwareBlastCollector) collectSession(ch chan<- prometheus.Metric) (*pro
func (c *vmwareBlastCollector) collectSkypeforBusinessControl(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastSkypeforBusinessControlCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1176,7 +1180,7 @@ func (c *vmwareBlastCollector) collectSkypeforBusinessControl(ch chan<- promethe
func (c *vmwareBlastCollector) collectThinPrint(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastThinPrintCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1215,7 +1219,7 @@ func (c *vmwareBlastCollector) collectThinPrint(ch chan<- prometheus.Metric) (*p
func (c *vmwareBlastCollector) collectUsb(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastUSBCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
@@ -1254,7 +1258,7 @@ func (c *vmwareBlastCollector) collectUsb(ch chan<- prometheus.Metric) (*prometh
func (c *vmwareBlastCollector) collectWindowsMediaMmr(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters
q := queryAll(&dst)
q := queryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}

View File

@@ -2,9 +2,11 @@ package collector
import (
"bytes"
"fmt"
"reflect"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
)
func className(src interface{}) string {
@@ -16,25 +18,25 @@ func className(src interface{}) string {
return t.Name()
}
func queryAll(src interface{}) string {
func queryAll(src interface{}, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(className(src))
log.Debugf("Generated WMI query %s", b.String())
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}
func queryAllForClass(src interface{}, class string) string {
func queryAllForClass(src interface{}, class string, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(class)
log.Debugf("Generated WMI query %s", b.String())
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}
func queryAllWhere(src interface{}, where string) string {
func queryAllWhere(src interface{}, where string, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(className(src))
@@ -44,11 +46,11 @@ func queryAllWhere(src interface{}, where string) string {
b.WriteString(where)
}
log.Debugf("Generated WMI query %s", b.String())
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}
func queryAllForClassWhere(src interface{}, class string, where string) string {
func queryAllForClassWhere(src interface{}, class string, where string, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(class)
@@ -58,6 +60,6 @@ func queryAllForClassWhere(src interface{}, class string, where string) string {
b.WriteString(where)
}
log.Debugf("Generated WMI query %s", b.String())
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}

View File

@@ -2,6 +2,8 @@ package collector
import (
"testing"
"github.com/go-kit/log"
)
type fakeWmiClass struct {
@@ -11,16 +13,16 @@ type fakeWmiClass struct {
var (
mapQueryAll = func(src interface{}, class string, where string) string {
return queryAll(src)
return queryAll(src, log.NewNopLogger())
}
mapQueryAllWhere = func(src interface{}, class string, where string) string {
return queryAllWhere(src, where)
return queryAllWhere(src, where, log.NewNopLogger())
}
mapQueryAllForClass = func(src interface{}, class string, where string) string {
return queryAllForClass(src, class)
return queryAllForClass(src, class, log.NewNopLogger())
}
mapQueryAllForClassWhere = func(src interface{}, class string, where string) string {
return queryAllForClassWhere(src, class, where)
return queryAllForClassWhere(src, class, where, log.NewNopLogger())
}
)

View File

@@ -14,11 +14,12 @@
package config
import (
"io/ioutil"
"fmt"
"os"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"gopkg.in/yaml.v3"
)
@@ -32,13 +33,13 @@ type Resolver struct {
}
// NewResolver returns a Resolver structure.
func NewResolver(file string) (*Resolver, error) {
func NewResolver(file string, logger log.Logger) (*Resolver, error) {
flags := map[string]string{}
log.Infof("Loading configuration file: %v", file)
level.Info(logger).Log("msg", fmt.Sprintf("Loading configuration file: %v", file))
if _, err := os.Stat(file); err != nil {
return nil, err
}
b, err := ioutil.ReadFile(file)
b, err := os.ReadFile(file)
if err != nil {
return nil, err
}

View File

@@ -1,9 +1,10 @@
package config
import (
"gopkg.in/yaml.v3"
"reflect"
"testing"
"gopkg.in/yaml.v3"
)
// Unmarshal good configuration file and confirm data is flattened correctly

View File

@@ -6,14 +6,16 @@ package main
import (
//Its important that we do these first so that we can register with the windows service control ASAP to avoid timeouts
"github.com/prometheus-community/windows_exporter/initiate"
"github.com/prometheus-community/windows_exporter/log"
winlog "github.com/prometheus-community/windows_exporter/log"
"encoding/json"
"fmt"
stdlog "log"
"net/http"
_ "net/http/pprof"
"os"
"os/user"
"runtime"
"sort"
"strconv"
"strings"
@@ -21,9 +23,12 @@ import (
"github.com/prometheus-community/windows_exporter/collector"
"github.com/prometheus-community/windows_exporter/config"
"github.com/prometheus-community/windows_exporter/log/flag"
"github.com/yusufpapurcu/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
@@ -64,12 +69,12 @@ func expandEnabledCollectors(enabled string) []string {
return result
}
func loadCollectors(list string) (map[string]collector.Collector, error) {
func loadCollectors(list string, logger log.Logger) (map[string]collector.Collector, error) {
collectors := map[string]collector.Collector{}
enabled := expandEnabledCollectors(list)
for _, name := range enabled {
c, err := collector.Build(name)
c, err := collector.Build(name, logger)
if err != nil {
return nil, err
}
@@ -79,14 +84,15 @@ func loadCollectors(list string) (map[string]collector.Collector, error) {
return collectors, nil
}
func initWbem() {
func initWbem(logger log.Logger) {
// This initialization prevents a memory leak on WMF 5+. See
// https://github.com/prometheus-community/windows_exporter/issues/77 and
// linked issues for details.
log.Debugf("Initializing SWbemServices")
level.Debug(logger).Log("msg", "Initializing SWbemServices")
s, err := wmi.InitializeSWbemServices(wmi.DefaultClient)
if err != nil {
log.Fatal(err)
level.Error(logger).Log("err", err)
os.Exit(1)
}
wmi.DefaultClient.AllowMissingFields = true
wmi.DefaultClient.SWbemServicesClient = s
@@ -125,7 +131,10 @@ func main() {
"Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.",
).Default("0.5").Float64()
)
log.AddFlags(app)
winlogConfig := &winlog.Config{}
flag.AddFlags(app, winlogConfig)
app.Version(version.Print("windows_exporter"))
app.HelpFlag.Short('h')
@@ -135,15 +144,23 @@ func main() {
// Load values from configuration file(s). Executable flags must first be parsed, in order
// to load the specified file(s).
kingpin.MustParse(app.Parse(os.Args[1:]))
log.Debug("Logging has Started")
logger, err := winlog.New(winlogConfig)
if err != nil {
level.Error(logger).Log("err", err)
os.Exit(1)
}
level.Debug(logger).Log("msg", "Logging has Started")
if *configFile != "" {
resolver, err := config.NewResolver(*configFile)
resolver, err := config.NewResolver(*configFile, logger)
if err != nil {
log.Fatalf("could not load config file: %v\n", err)
level.Error(logger).Log("msg", "could not load config file", "err", err)
os.Exit(1)
}
err = resolver.Bind(app, os.Args[1:])
if err != nil {
log.Fatalf("%v\n", err)
level.Error(logger).Log("err", err)
os.Exit(1)
}
// NOTE: This is temporary fix for issue #1092, calling kingpin.Parse
@@ -153,6 +170,12 @@ func main() {
// Parse flags once more to include those discovered in configuration file(s).
kingpin.MustParse(app.Parse(os.Args[1:]))
logger, err = winlog.New(winlogConfig)
if err != nil {
level.Error(logger).Log("err", err)
os.Exit(1)
}
}
if *printCollectors {
@@ -169,27 +192,29 @@ func main() {
return
}
initWbem()
initWbem(logger)
// Initialize collectors before loading
collector.RegisterCollectors()
collector.RegisterCollectors(logger)
collectors, err := loadCollectors(*enabledCollectors)
collectors, err := loadCollectors(*enabledCollectors, logger)
if err != nil {
log.Fatalf("Couldn't load collectors: %s", err)
level.Error(logger).Log("msg", "Couldn't load collectors", "err", err)
os.Exit(1)
}
u, err := user.Current()
if err != nil {
log.Fatalf(err.Error())
level.Error(logger).Log("err", err)
os.Exit(1)
}
log.Infof("Running as %v", u.Username)
level.Info(logger).Log("msg", fmt.Sprintf("Running as %v", u.Username))
if strings.Contains(u.Username, "ContainerAdministrator") || strings.Contains(u.Username, "ContainerUser") {
log.Warnf("Running as a preconfigured Windows Container user. This may mean you do not have Windows HostProcess containers configured correctly and some functionality will not work as expected.")
level.Warn(logger).Log("msg", "Running as a preconfigured Windows Container user. This may mean you do not have Windows HostProcess containers configured correctly and some functionality will not work as expected.")
}
log.Infof("Enabled collectors: %v", strings.Join(keys(collectors), ", "))
level.Info(logger).Log("msg", fmt.Sprintf("Enabled collectors: %v", strings.Join(keys(collectors), ", ")))
h := &metricsHandler{
timeoutMargin: *timeoutMargin,
@@ -207,12 +232,19 @@ func main() {
}
filteredCollectors[name] = col
}
return nil, collector.NewPrometheus(timeout, filteredCollectors)
return nil, collector.NewPrometheus(timeout, filteredCollectors, logger)
},
logger: logger,
}
http.HandleFunc(*metricsPath, withConcurrencyLimit(*maxRequests, h.ServeHTTP))
http.HandleFunc("/health", healthCheck)
http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, err := fmt.Fprintln(w, `{"status":"ok"}`)
if err != nil {
level.Debug(logger).Log("Failed to write to stream", "err", err)
}
})
http.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
// we can't use "version" directly as it is a package, and not an object that
// can be serialized.
@@ -250,37 +282,32 @@ func main() {
}
landingPage, err := web.NewLandingPage(landingConfig)
if err != nil {
log.Fatalf("failed to generate landing page: %v", err)
level.Error(logger).Log("msg", "failed to generate landing page", "err", err)
os.Exit(1)
}
http.Handle("/", landingPage)
}
log.Infoln("Starting windows_exporter", version.Info())
log.Infoln("Build context", version.BuildContext())
level.Info(logger).Log("msg", "Starting windows_exporter", "version", version.Info())
level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext())
level.Debug(logger).Log("msg", "Go MAXPROCS", "procs", runtime.GOMAXPROCS(0))
go func() {
server := &http.Server{}
if err := web.ListenAndServe(server, webConfig, log.NewToolkitAdapter()); err != nil {
log.Fatalf("cannot start windows_exporter: %s", err)
if err := web.ListenAndServe(server, webConfig, logger); err != nil {
level.Error(logger).Log("msg", "cannot start windows_exporter", "err", err)
os.Exit(1)
}
}()
for {
if <-initiate.StopCh {
log.Info("Shutting down windows_exporter")
level.Info(logger).Log("msg", "Shutting down windows_exporter")
break
}
}
}
func healthCheck(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, err := fmt.Fprintln(w, `{"status":"ok"}`)
if err != nil {
log.Debugf("Failed to write to stream: %v", err)
}
}
func keys(m map[string]collector.Collector) []string {
ret := make([]string, 0, len(m))
for key := range m {
@@ -312,6 +339,7 @@ type metricsHandler struct {
timeoutMargin float64
includeExporterMetrics bool
collectorFactory func(timeout time.Duration, requestedCollectors []string) (error, *collector.Prometheus)
logger log.Logger
}
func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@@ -322,7 +350,7 @@ func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
timeoutSeconds, err = strconv.ParseFloat(v, 64)
if err != nil {
log.Warnf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout)
level.Warn(mh.logger).Log("msg", fmt.Sprintf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout))
}
}
if timeoutSeconds == 0 {
@@ -333,7 +361,7 @@ func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
reg := prometheus.NewRegistry()
err, wc := mh.collectorFactory(time.Duration(timeoutSeconds*float64(time.Second)), r.URL.Query()["collect[]"])
if err != nil {
log.Warnln("Couldn't create filtered metrics handler: ", err)
level.Warn(mh.logger).Log("msg", "Couldn't create filtered metrics handler", "err", err)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err))) //nolint:errcheck
return
@@ -347,6 +375,8 @@ func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
)
}
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{
ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(mh.logger)), "", stdlog.Lshortfile),
})
h.ServeHTTP(w, r)
}

View File

@@ -3,9 +3,10 @@ package initiate
import (
"fmt"
"os"
"github.com/prometheus-community/windows_exporter/log"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/eventlog"
)
const (
@@ -16,6 +17,8 @@ type windowsExporterService struct {
stopCh chan<- bool
}
var logger *eventlog.Log
func (s *windowsExporterService) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) {
const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
changes <- svc.Status{State: svc.StartPending}
@@ -28,11 +31,11 @@ loop:
case svc.Interrogate:
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
log.Debug("Service Stop Received")
_ = logger.Info(100, "Service Stop Received")
s.stopCh <- true
break loop
default:
log.Error(fmt.Sprintf("unexpected control request #%d", c))
_ = logger.Error(102, fmt.Sprintf("unexpected control request #%d", c))
}
}
}
@@ -43,17 +46,26 @@ loop:
var StopCh = make(chan bool)
func init() {
log.Debug("Checking if We are a service")
isService, err := svc.IsWindowsService()
if err != nil {
log.Fatal(err)
logger, err := eventlog.Open("windows_exporter")
if err != nil {
os.Exit(2)
}
_ = logger.Error(102, fmt.Sprintf("Failed to detect service: %v", err))
os.Exit(1)
}
log.Debug("Attempting to start exporter service")
if isService {
logger, err := eventlog.Open("windows_exporter")
if err != nil {
os.Exit(2)
}
_ = logger.Info(100, "Attempting to start exporter service")
go func() {
err = svc.Run(serviceName, &windowsExporterService{stopCh: StopCh})
if err != nil {
log.Errorf("Failed to start service: %v", err)
_ = logger.Error(102, fmt.Sprintf("Failed to start service: %v", err))
}
}()
}

View File

@@ -52,7 +52,7 @@
<fw:RemoteAddress>[REMOTE_ADDR]</fw:RemoteAddress>
</fw:FirewallException>
</File>
<ServiceInstall Id="InstallExporterService" Name="windows_exporter" DisplayName="windows_exporter" Description="Exports Prometheus metrics about the system" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="--log.format logger:eventlog?name=windows_exporter [CollectorsFlag] [ListenFlagBoth] [ListenFlagAddr] [ListenFlagPort] [MetricsPathFlag] [TextfileDirFlag] [ExtraFlags]">
<ServiceInstall Id="InstallExporterService" Name="windows_exporter" DisplayName="windows_exporter" Description="Exports Prometheus metrics about the system" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="--log.format eventlog [CollectorsFlag] [ListenFlagBoth] [ListenFlagAddr] [ListenFlagPort] [MetricsPathFlag] [TextfileDirFlag] [ExtraFlags]">
<util:ServiceConfig FirstFailureActionType="restart" SecondFailureActionType="restart" ThirdFailureActionType="restart" RestartServiceDelayInSeconds="60" />
<ServiceDependency Id="wmiApSrv" />
</ServiceInstall>

124
log/eventlog/eventlog.go Normal file
View File

@@ -0,0 +1,124 @@
//go:build windows
// +build windows
// Package eventlog provides a Logger that writes to Windows Event Log.
package eventlog
import (
"bytes"
"fmt"
"io"
"sync"
"syscall"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"golang.org/x/sys/windows"
goeventlog "golang.org/x/sys/windows/svc/eventlog"
)
const (
// NeLogOemCode is a generic error log entry for OEMs to use to
// elog errors from OEM value added services.
// See: https://github.com/microsoft/win32metadata/blob/2f3c5282ce1024a712aeccd90d3aa50bf7a49e27/generation/WinSDK/RecompiledIdlHeaders/um/LMErrlog.h#L824-L845
neLogOemCode = uint32(3299)
)
type Priority struct {
etype int
}
// NewEventLogLogger returns a new Logger which writes to Windows EventLog in event log format.
// The body of the log message is the formatted output from the Logger returned
// by newLogger.
func NewEventLogLogger(w *goeventlog.Log, newLogger func(io.Writer) log.Logger) log.Logger {
l := &eventlogLogger{
w: w,
newLogger: newLogger,
prioritySelector: defaultPrioritySelector,
bufPool: sync.Pool{New: func() interface{} {
return &loggerBuf{}
}},
}
return l
}
type eventlogLogger struct {
w *goeventlog.Log
newLogger func(io.Writer) log.Logger
prioritySelector PrioritySelector
bufPool sync.Pool
}
func (l *eventlogLogger) Log(keyvals ...interface{}) error {
priority := l.prioritySelector(keyvals...)
lb := l.getLoggerBuf()
defer l.putLoggerBuf(lb)
if err := lb.logger.Log(keyvals...); err != nil {
return err
}
// golang.org/x/sys/windows/svc/eventlog does not provide func which allows to send more than one string.
// See: https://github.com/golang/go/issues/59780
msg, err := syscall.UTF16PtrFromString(lb.buf.String())
if err != nil {
return fmt.Errorf("error convert string to UTF-16: %v", err)
}
ss := []*uint16{msg, nil, nil, nil, nil, nil, nil, nil, nil}
return windows.ReportEvent(l.w.Handle, uint16(priority.etype), 0, neLogOemCode, 0, 9, 0, &ss[0], nil)
}
type loggerBuf struct {
buf *bytes.Buffer
logger log.Logger
}
func (l *eventlogLogger) getLoggerBuf() *loggerBuf {
lb := l.bufPool.Get().(*loggerBuf)
if lb.buf == nil {
lb.buf = &bytes.Buffer{}
lb.logger = l.newLogger(lb.buf)
} else {
lb.buf.Reset()
}
return lb
}
func (l *eventlogLogger) putLoggerBuf(lb *loggerBuf) {
l.bufPool.Put(lb)
}
// PrioritySelector inspects the list of keyvals and selects an eventlog priority.
type PrioritySelector func(keyvals ...interface{}) Priority
// defaultPrioritySelector convert a kit/log level into a Windows Eventlog level
func defaultPrioritySelector(keyvals ...interface{}) Priority {
l := len(keyvals)
eType := windows.EVENTLOG_SUCCESS
for i := 0; i < l; i += 2 {
if keyvals[i] == level.Key() {
var val interface{}
if i+1 < l {
val = keyvals[i+1]
}
if v, ok := val.(level.Value); ok {
switch v {
case level.ErrorValue():
eType = windows.EVENTLOG_ERROR_TYPE
case level.WarnValue():
eType = windows.EVENTLOG_WARNING_TYPE
case level.InfoValue():
eType = windows.EVENTLOG_INFORMATION_TYPE
}
}
}
}
return Priority{etype: eType}
}

View File

@@ -1,92 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Implementation forked from github.com/prometheus/common
//
//go:build windows
// +build windows
package log
import (
"fmt"
"os"
"golang.org/x/sys/windows/svc/eventlog"
"github.com/sirupsen/logrus"
)
func init() {
setEventlogFormatter = func(l logger, name string, debugAsInfo bool) error {
if name == "" {
return fmt.Errorf("missing name parameter")
}
fmter, err := newEventlogger(name, debugAsInfo, l.entry.Logger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
l.Errorf("can't connect logger to eventlog: %v", err)
return err
}
l.entry.Logger.Formatter = fmter
return nil
}
}
type eventlogger struct {
log *eventlog.Log
debugAsInfo bool
wrap logrus.Formatter
}
func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
logHandle, err := eventlog.Open(name)
if err != nil {
return nil, err
}
return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
}
func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
data, err := s.wrap.Format(e)
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
return data, err
}
switch e.Level {
case logrus.PanicLevel:
fallthrough
case logrus.FatalLevel:
fallthrough
case logrus.ErrorLevel:
err = s.log.Error(102, e.Message)
case logrus.WarnLevel:
err = s.log.Warning(101, e.Message)
case logrus.InfoLevel:
err = s.log.Info(100, e.Message)
case logrus.DebugLevel:
if s.debugAsInfo {
err = s.log.Info(100, e.Message)
}
default:
err = s.log.Info(100, e.Message)
}
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
}
return data, err
}

43
log/flag/flag.go Normal file
View File

@@ -0,0 +1,43 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flag
import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/log"
"github.com/prometheus/common/promlog"
promlogflag "github.com/prometheus/common/promlog/flag"
)
// FileFlagName is the canonical flag name to configure the log file
const FileFlagName = "log.file"
// FileFlagHelp is the help description for the log.file flag.
const FileFlagHelp = "Output file of log messages. One of [stdout, stderr, eventlog, <path to log file>]"
// AddFlags adds the flags used by this package to the Kingpin application.
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
func AddFlags(a *kingpin.Application, config *log.Config) {
config.Level = &promlog.AllowedLevel{}
a.Flag(promlogflag.LevelFlagName, promlogflag.LevelFlagHelp).
Default("info").SetValue(config.Level)
config.File = &log.AllowedFile{}
a.Flag(FileFlagName, FileFlagHelp).
Default("stderr").SetValue(config.File)
config.Format = &promlog.AllowedFormat{}
a.Flag(promlogflag.FormatFlagName, promlogflag.FormatFlagHelp).
Default("logfmt").SetValue(config.Format)
}

View File

@@ -1,46 +0,0 @@
package log
import (
"github.com/go-kit/log/level"
)
// Returns an adapter implementing the go-kit/kit/log.Logger interface on our
// logrus logger
func NewToolkitAdapter() *Adapter {
return &Adapter{}
}
type Adapter struct{}
func (*Adapter) Log(keyvals ...interface{}) error {
var lvl level.Value
var msg string
for i := 0; i < len(keyvals); i += 2 {
switch keyvals[i] {
case "level":
tlvl, ok := keyvals[i+1].(level.Value)
if !ok {
Warnf("Could not cast level of type %T", keyvals[i+1])
} else {
lvl = tlvl
}
case "msg":
msg = keyvals[i+1].(string)
}
}
switch lvl {
case level.ErrorValue():
Errorln(msg)
case level.WarnValue():
Warnln(msg)
case level.InfoValue():
Infoln(msg)
case level.DebugValue():
Debugln(msg)
default:
Warnf("Unmatched log level: '%v' for message %q", lvl, msg)
}
return nil
}

View File

@@ -1,366 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package log implements logging via logrus.
// Implementation forked from github.com/prometheus/common
package log
import (
"fmt"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/sirupsen/logrus"
)
// setSyslogFormatter is nil if the target architecture does not support syslog.
var setSyslogFormatter func(logger, string, string) error
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
var setEventlogFormatter func(logger, string, bool) error
func setJSONFormatter() {
origLogger.Formatter = &logrus.JSONFormatter{}
}
type loggerSettings struct {
level string
format string
}
func (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {
err := baseLogger.SetLevel(s.level)
if err != nil {
return err
}
err = baseLogger.SetFormat(s.format)
return err
}
// AddFlags adds the flags used by this package to the Kingpin application.
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
func AddFlags(a *kingpin.Application) {
s := loggerSettings{}
a.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").
Default(origLogger.Level.String()).
StringVar(&s.level)
defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"}
a.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
Default(defaultFormat.String()).
StringVar(&s.format)
a.Action(s.apply)
}
// Logger is the interface for loggers used in the Prometheus components.
type Logger interface {
Debug(...interface{})
Debugln(...interface{})
Debugf(string, ...interface{})
Info(...interface{})
Infoln(...interface{})
Infof(string, ...interface{})
Warn(...interface{})
Warnln(...interface{})
Warnf(string, ...interface{})
Error(...interface{})
Errorln(...interface{})
Errorf(string, ...interface{})
Fatal(...interface{})
Fatalln(...interface{})
Fatalf(string, ...interface{})
With(key string, value interface{}) Logger
SetFormat(string) error
SetLevel(string) error
}
type logger struct {
entry *logrus.Entry
}
func (l logger) With(key string, value interface{}) Logger {
return logger{l.entry.WithField(key, value)}
}
// Debug logs a message at level Debug on the standard logger.
func (l logger) Debug(args ...interface{}) {
l.sourced().Debug(args...)
}
// Debug logs a message at level Debug on the standard logger.
func (l logger) Debugln(args ...interface{}) {
l.sourced().Debugln(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func (l logger) Debugf(format string, args ...interface{}) {
l.sourced().Debugf(format, args...)
}
// Info logs a message at level Info on the standard logger.
func (l logger) Info(args ...interface{}) {
l.sourced().Info(args...)
}
// Info logs a message at level Info on the standard logger.
func (l logger) Infoln(args ...interface{}) {
l.sourced().Infoln(args...)
}
// Infof logs a message at level Info on the standard logger.
func (l logger) Infof(format string, args ...interface{}) {
l.sourced().Infof(format, args...)
}
// Warn logs a message at level Warn on the standard logger.
func (l logger) Warn(args ...interface{}) {
l.sourced().Warn(args...)
}
// Warn logs a message at level Warn on the standard logger.
func (l logger) Warnln(args ...interface{}) {
l.sourced().Warnln(args...)
}
// Warnf logs a message at level Warn on the standard logger.
func (l logger) Warnf(format string, args ...interface{}) {
l.sourced().Warnf(format, args...)
}
// Error logs a message at level Error on the standard logger.
func (l logger) Error(args ...interface{}) {
l.sourced().Error(args...)
}
// Error logs a message at level Error on the standard logger.
func (l logger) Errorln(args ...interface{}) {
l.sourced().Errorln(args...)
}
// Errorf logs a message at level Error on the standard logger.
func (l logger) Errorf(format string, args ...interface{}) {
l.sourced().Errorf(format, args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func (l logger) Fatal(args ...interface{}) {
l.sourced().Fatal(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func (l logger) Fatalln(args ...interface{}) {
l.sourced().Fatalln(args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func (l logger) Fatalf(format string, args ...interface{}) {
l.sourced().Fatalf(format, args...)
}
func (l logger) SetLevel(level string) error {
lvl, err := logrus.ParseLevel(level)
if err != nil {
return err
}
l.entry.Logger.Level = lvl
return nil
}
func (l logger) SetFormat(format string) error {
u, err := url.Parse(format)
if err != nil {
return err
}
if u.Scheme != "logger" {
return fmt.Errorf("invalid scheme %s", u.Scheme)
}
jsonq := u.Query().Get("json")
if jsonq == "true" {
setJSONFormatter()
}
switch u.Opaque {
case "syslog":
if setSyslogFormatter == nil {
return fmt.Errorf("system does not support syslog")
}
appname := u.Query().Get("appname")
facility := u.Query().Get("local")
return setSyslogFormatter(l, appname, facility)
case "eventlog":
if setEventlogFormatter == nil {
return fmt.Errorf("system does not support eventlog")
}
name := u.Query().Get("name")
debugAsInfo := false
debugAsInfoRaw := u.Query().Get("debugAsInfo")
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
debugAsInfo = parsedDebugAsInfo
}
return setEventlogFormatter(l, name, debugAsInfo)
case "stdout":
l.entry.Logger.Out = os.Stdout
case "stderr":
l.entry.Logger.Out = os.Stderr
default:
return fmt.Errorf("unsupported logger %q", u.Opaque)
}
return nil
}
// sourced adds a source field to the logger that contains
// the file name and line where the logging happened.
func (l logger) sourced() *logrus.Entry {
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "<???>"
line = 1
} else {
slash := strings.LastIndex(file, "/")
file = file[slash+1:]
}
return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
}
var origLogger = logrus.New()
var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
// Base returns the default Logger logging to
func Base() Logger {
return baseLogger
}
// NewLogger returns a new Logger logging to out.
func NewLogger(w io.Writer) Logger {
l := logrus.New()
l.Out = w
return logger{entry: logrus.NewEntry(l)}
}
// NewNopLogger returns a logger that discards all log messages.
func NewNopLogger() Logger {
l := logrus.New()
l.Out = ioutil.Discard
return logger{entry: logrus.NewEntry(l)}
}
// With adds a field to the logger.
func With(key string, value interface{}) Logger {
return baseLogger.With(key, value)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
baseLogger.sourced().Debug(args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
baseLogger.sourced().Debugln(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
baseLogger.sourced().Debugf(format, args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
baseLogger.sourced().Info(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
baseLogger.sourced().Infoln(args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
baseLogger.sourced().Infof(format, args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
baseLogger.sourced().Warn(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
baseLogger.sourced().Warnln(args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
baseLogger.sourced().Warnf(format, args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
baseLogger.sourced().Error(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
baseLogger.sourced().Errorln(args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
baseLogger.sourced().Errorf(format, args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
baseLogger.sourced().Fatal(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
baseLogger.sourced().Fatalln(args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
baseLogger.sourced().Fatalf(format, args...)
}
// AddHook adds hook to Prometheus' original logger.
func AddHook(hook logrus.Hook) {
origLogger.Hooks.Add(hook)
}
type errorLogWriter struct{}
func (errorLogWriter) Write(b []byte) (int, error) {
baseLogger.sourced().Error(string(b))
return len(b), nil
}
// NewErrorLogger returns a log.Logger that is meant to be used
// in the ErrorLog field of an http.Server to log HTTP server errors.
func NewErrorLogger() *log.Logger {
return log.New(&errorLogWriter{}, "", 0)
}

90
log/logger.go Normal file
View File

@@ -0,0 +1,90 @@
package log
import (
"errors"
"fmt"
"io"
"os"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/log/eventlog"
"github.com/prometheus/common/promlog"
goeventlog "golang.org/x/sys/windows/svc/eventlog"
)
// AllowedFile is a settable identifier for the output file that the logger can have.
type AllowedFile struct {
s string
w io.Writer
}
func (f *AllowedFile) String() string {
return f.s
}
// Set updates the value of the allowed format.
func (f *AllowedFile) Set(s string) error {
switch s {
case "stdout":
f.w = os.Stdout
case "stderr":
f.w = os.Stderr
case "eventlog":
f.w = nil
default:
file, err := os.OpenFile(s, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
return err
}
f.w = file
}
return nil
}
// Config is a struct containing configurable settings for the logger
type Config struct {
promlog.Config
File *AllowedFile
}
func New(config *Config) (log.Logger, error) {
if config.File == nil {
return nil, errors.New("log file undefined")
}
if config.Format == nil {
return nil, errors.New("log format undefined")
}
var (
l log.Logger
loggerFunc func(io.Writer) log.Logger
)
switch config.Format.String() {
case "json":
loggerFunc = log.NewJSONLogger
case "logfmt":
loggerFunc = log.NewLogfmtLogger
default:
return nil, fmt.Errorf("unsupported log.format %q", config.Format.String())
}
if config.File.s == "eventlog" {
w, err := goeventlog.Open("windows_exporter")
if err != nil {
return nil, err
}
l = eventlog.NewEventLogLogger(w, loggerFunc)
} else {
l = loggerFunc(log.NewSyncWriter(config.File.w))
}
promlogConfig := promlog.Config{
Format: config.Format,
Level: config.Level,
}
return promlog.NewWithLogger(l, &promlogConfig), nil
}