mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-07 21:46:37 +00:00
feat: Tolerate collector failures (#1769)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
@@ -11,7 +11,6 @@ linters:
|
||||
- exportloopref
|
||||
- fatcontext
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
@@ -88,7 +87,3 @@ issues:
|
||||
- text: "don't use ALL_CAPS in Go names; use CamelCase"
|
||||
linters:
|
||||
- revive
|
||||
- path: internal/perfdata/v1/
|
||||
linters:
|
||||
- godox
|
||||
- stylecheck
|
||||
|
||||
13
.run/all.run.xml
Normal file
13
.run/all.run.xml
Normal file
@@ -0,0 +1,13 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="all" type="GoApplicationRunConfiguration" factoryName="Go Application" folderName="run">
|
||||
<module name="windows_exporter" />
|
||||
<working_directory value="$PROJECT_DIR$" />
|
||||
<parameters value="--web.listen-address=127.0.0.1:9182 --log.level=debug --collectors.enabled=ad,adcs,adfs,cache,container,cpu,cpu_info,cs,dfsr,dhcp,diskdrive,dns,exchange,filetime,fsrmquota,hyperv,iis,license,logical_disk,logon,memory,mscluster,msmq,mssql,net,netframework,nps,os,pagefile,perfdata,physical_disk,printer,process,remote_fx,scheduled_task,service,smb,smbclient,smtp,system,tcp,terminal_services,textfile,thermalzone,time,udp,update,vmware" />
|
||||
<sudo value="true" />
|
||||
<kind value="PACKAGE" />
|
||||
<package value="github.com/prometheus-community/windows_exporter/cmd/windows_exporter" />
|
||||
<directory value="$PROJECT_DIR$" />
|
||||
<filePath value="$PROJECT_DIR$/exporter.go" />
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
</component>
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
"github.com/prometheus-community/windows_exporter/internal/httphandler"
|
||||
"github.com/prometheus-community/windows_exporter/internal/log"
|
||||
"github.com/prometheus-community/windows_exporter/internal/log/flag"
|
||||
"github.com/prometheus-community/windows_exporter/internal/utils"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector"
|
||||
"github.com/prometheus/common/version"
|
||||
"github.com/prometheus/exporter-toolkit/web"
|
||||
@@ -64,6 +65,8 @@ func main() {
|
||||
}
|
||||
|
||||
func run() int {
|
||||
startTime := time.Now()
|
||||
|
||||
app := kingpin.New("windows_exporter", "A metrics collector for Windows.")
|
||||
|
||||
var (
|
||||
@@ -191,7 +194,7 @@ func run() int {
|
||||
|
||||
enabledCollectorList := expandEnabledCollectors(*enabledCollectors)
|
||||
if err := collectors.Enable(enabledCollectorList); err != nil {
|
||||
logger.Error("Couldn't enable collectors",
|
||||
logger.Error("couldn't enable collectors",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -200,11 +203,11 @@ func run() int {
|
||||
|
||||
// Initialize collectors before loading
|
||||
if err = collectors.Build(logger); err != nil {
|
||||
logger.Error("Couldn't load collectors",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return 1
|
||||
for _, err := range utils.SplitError(err) {
|
||||
logger.Warn("couldn't initialize collector",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
logCurrentUser(logger)
|
||||
@@ -228,7 +231,7 @@ func run() int {
|
||||
mux.HandleFunc("GET /debug/pprof/trace", pprof.Trace)
|
||||
}
|
||||
|
||||
logger.Info("Starting windows_exporter",
|
||||
logger.Info(fmt.Sprintf("starting windows_exporter in %s", time.Since(startTime)),
|
||||
slog.String("version", version.Version),
|
||||
slog.String("branch", version.Branch),
|
||||
slog.String("revision", version.GetRevision()),
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package ad
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
@@ -31,6 +30,7 @@ const Name = "ad"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
@@ -671,7 +671,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
data, ok := perfData["NTDS"]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for DirectoryServices (AD) returned empty result set")
|
||||
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package adcs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
@@ -32,6 +31,7 @@ const Name = "adcs"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
@@ -191,7 +191,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for Certification Authority (ADCS) returned empty result set")
|
||||
return fmt.Errorf("failed to collect Certification Authority (ADCS) metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for name, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package adfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"maps"
|
||||
@@ -34,6 +33,7 @@ const Name = "adfs"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
@@ -160,7 +160,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
avgConfigDBQueryTime,
|
||||
federationMetadataRequests,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -435,13 +435,13 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
instanceKey := slices.Collect(maps.Keys(data))
|
||||
|
||||
if len(instanceKey) == 0 {
|
||||
return errors.New("perflib query for ADFS returned empty result set")
|
||||
return fmt.Errorf("failed to collect ADFS metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
adfsData, ok := data[instanceKey[0]]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for ADFS returned empty result set")
|
||||
return fmt.Errorf("failed to collect ADFS metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
4
internal/collector/cache/cache.go
vendored
4
internal/collector/cache/cache.go
vendored
@@ -16,7 +16,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
@@ -31,6 +30,7 @@ const Name = "cache"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for Perflib Cache metrics.
|
||||
@@ -322,7 +322,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
cacheData, ok := data[perfdata.InstanceEmpty]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for Cache returned empty result set")
|
||||
return fmt.Errorf("failed to collect Cache metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -33,6 +33,7 @@ const Name = "container"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for containers metrics.
|
||||
|
||||
@@ -32,6 +32,7 @@ const Name = "cpu"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
|
||||
@@ -32,6 +32,7 @@ const Name = "cpu_info"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_Processor.
|
||||
@@ -147,6 +148,11 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []miProcessor
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ const Name = "cs"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
|
||||
@@ -53,5 +53,5 @@ const (
|
||||
databaseLookupsTotal = "Database Lookups"
|
||||
usnJournalRecordsReadTotal = "USN Journal Records Read"
|
||||
usnJournalRecordsAcceptedTotal = "USN Journal Records Accepted"
|
||||
usnJournalUnreadPercentage = "USN Journal Records Unread Percentage"
|
||||
usnJournalUnreadPercentage = "USN Journal Unread Percentage"
|
||||
)
|
||||
|
||||
@@ -35,6 +35,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{"connection", "folder", "volume"},
|
||||
}
|
||||
@@ -542,7 +543,7 @@ func (c *Collector) collectPDHConnection(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for DFS Replication Connections returned empty result set")
|
||||
return fmt.Errorf("failed to collect DFS Replication Connections metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for name, connection := range perfData {
|
||||
@@ -620,7 +621,7 @@ func (c *Collector) collectPDHFolder(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for DFS Replicated Folders returned empty result set")
|
||||
return fmt.Errorf("failed to collect DFS Replicated Folders metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for name, folder := range perfData {
|
||||
@@ -824,7 +825,7 @@ func (c *Collector) collectPDHVolume(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for DFS Replication Volumes returned empty result set")
|
||||
return fmt.Errorf("failed to collect DFS Replication Volumes metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for name, volume := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package dhcp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
@@ -31,6 +30,7 @@ const Name = "dhcp"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector perflib DHCP metrics.
|
||||
@@ -288,7 +288,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for DHCP Server returned empty result set")
|
||||
return fmt.Errorf("failed to collect DHCP Server metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -31,6 +31,7 @@ const Name = "diskdrive"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive.
|
||||
@@ -119,6 +120,11 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []diskDrive
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -133,6 +139,7 @@ type diskDrive struct {
|
||||
Availability uint16 `mi:"Availability"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var (
|
||||
allDiskStatus = []string{
|
||||
"OK",
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
@@ -31,6 +30,7 @@ const Name = "dns"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
|
||||
@@ -284,7 +284,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for DNS returned empty result set")
|
||||
return fmt.Errorf("failed to collect DNS metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -47,6 +47,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
adAccessProcesses,
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -73,7 +72,7 @@ func (c *Collector) collectActiveSync(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange ActiveSync returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange ActiveSync metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for _, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -89,7 +88,7 @@ func (c *Collector) collectADAccessProcesses(ch chan<- prometheus.Metric) error
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange ADAccess Processes returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange ADAccess Processes metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
labelUseCount := make(map[string]int)
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -53,7 +52,7 @@ func (c *Collector) collectAutoDiscover(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange Autodiscover returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange Autodiscover metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for _, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -53,7 +52,7 @@ func (c *Collector) collectAvailabilityService(ch chan<- prometheus.Metric) erro
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange Availability Service returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange Availability Service metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for _, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -97,7 +96,7 @@ func (c *Collector) collectHTTPProxy(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange HttpProxy Service returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange HttpProxy Service metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for name, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -57,7 +56,7 @@ func (c *Collector) collectMapiHttpEmsmdb(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange MapiHttp Emsmdb returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange MapiHttp Emsmdb metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for _, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -65,7 +64,7 @@ func (c *Collector) collectOWA(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange OWA returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange OWA metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for _, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -97,7 +96,7 @@ func (c *Collector) collectRPC(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange RpcClientAccess returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange RpcClientAccess metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for _, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -201,7 +200,7 @@ func (c *Collector) collectTransportQueues(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchangeTransport Queues returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchangeTransport Queues metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for name, data := range perfData {
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -89,7 +88,7 @@ func (c *Collector) collectWorkloadManagementWorkloads(ch chan<- prometheus.Metr
|
||||
}
|
||||
|
||||
if len(perfData) == 0 {
|
||||
return errors.New("perflib query for MSExchange WorkloadManagement Workloads returned empty result set")
|
||||
return fmt.Errorf("failed to collect MSExchange WorkloadManagement Workloads metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
for name, data := range perfData {
|
||||
|
||||
@@ -36,6 +36,7 @@ type Config struct {
|
||||
FilePatterns []string
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
FilePatterns: []string{},
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ const Name = "fsrmquota"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
@@ -142,30 +143,32 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []msftFSRMQuota
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootWindowsFSRM, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MSFT_FSRMQuota docs:
|
||||
// https://docs.microsoft.com/en-us/previous-versions/windows/desktop/fsrm/msft-fsrmquota
|
||||
type MSFT_FSRMQuota struct {
|
||||
Name string `mi:"Name"`
|
||||
|
||||
Path string `mi:"Path"`
|
||||
PeakUsage uint64 `mi:"PeakUsage"`
|
||||
Size uint64 `mi:"Size"`
|
||||
Usage uint64 `mi:"Usage"`
|
||||
Description string `mi:"Description"`
|
||||
Template string `mi:"Template"`
|
||||
// Threshold string `mi:"Threshold"`
|
||||
Disabled bool `mi:"Disabled"`
|
||||
MatchesTemplate bool `mi:"MatchesTemplate"`
|
||||
SoftLimit bool `mi:"SoftLimit"`
|
||||
type msftFSRMQuota struct {
|
||||
Path string `mi:"Path"`
|
||||
PeakUsage uint64 `mi:"PeakUsage"`
|
||||
Size uint64 `mi:"Size"`
|
||||
Usage uint64 `mi:"Usage"`
|
||||
Description string `mi:"Description"`
|
||||
Template string `mi:"Template"`
|
||||
Disabled bool `mi:"Disabled"`
|
||||
MatchesTemplate bool `mi:"MatchesTemplate"`
|
||||
SoftLimit bool `mi:"SoftLimit"`
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []MSFT_FSRMQuota
|
||||
var dst []msftFSRMQuota
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootWindowsFSRM, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,9 @@ import (
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,6 +54,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
subCollectorDataStore,
|
||||
@@ -154,15 +157,19 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
version := windows.RtlGetVersion()
|
||||
|
||||
subCollectors := map[string]struct {
|
||||
build func() error
|
||||
collect func(ch chan<- prometheus.Metric) error
|
||||
close func()
|
||||
build func() error
|
||||
collect func(ch chan<- prometheus.Metric) error
|
||||
close func()
|
||||
minBuildNumber uint32
|
||||
}{
|
||||
subCollectorDataStore: {
|
||||
build: c.buildDataStore,
|
||||
collect: c.collectDataStore,
|
||||
close: c.perfDataCollectorDataStore.Close,
|
||||
build: c.buildDataStore,
|
||||
collect: c.collectDataStore,
|
||||
close: c.perfDataCollectorDataStore.Close,
|
||||
minBuildNumber: types.BuildNumberWindowsServer2022,
|
||||
},
|
||||
subCollectorDynamicMemoryBalancer: {
|
||||
build: c.buildDynamicMemoryBalancer,
|
||||
@@ -239,20 +246,30 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
// Result must order, to prevent test failures.
|
||||
sort.Strings(c.config.CollectorsEnabled)
|
||||
|
||||
errs := make([]error, 0, len(c.config.CollectorsEnabled))
|
||||
|
||||
for _, name := range c.config.CollectorsEnabled {
|
||||
if _, ok := subCollectors[name]; !ok {
|
||||
return fmt.Errorf("unknown collector: %s", name)
|
||||
}
|
||||
|
||||
if version.BuildNumber < subCollectors[name].minBuildNumber {
|
||||
errs = append(errs, fmt.Errorf("collector %s requires Windows Server 2022 or newer", name))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err := subCollectors[name].build(); err != nil {
|
||||
return fmt.Errorf("failed to build %s collector: %w", name, err)
|
||||
errs = append(errs, fmt.Errorf("failed to build %s collector: %w", name, err))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
c.collectorFns = append(c.collectorFns, subCollectors[name].collect)
|
||||
c.closeFns = append(c.closeFns, subCollectors[name].close)
|
||||
}
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -177,7 +176,7 @@ func (c *Collector) buildDataStore() error {
|
||||
dataStoreSetOperationLatencyMicro,
|
||||
dataStoreSetOperationCount,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V DataStore collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -463,7 +462,7 @@ func (c *Collector) buildDataStore() error {
|
||||
|
||||
func (c *Collector) collectDataStore(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorDataStore.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V DataStore metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -69,7 +68,7 @@ func (c *Collector) buildDynamicMemoryVM() error {
|
||||
vmMemoryRemovedMemory,
|
||||
vmMemoryGuestAvailableMemory,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Dynamic Memory VM collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -139,7 +138,7 @@ func (c *Collector) buildDynamicMemoryVM() error {
|
||||
|
||||
func (c *Collector) collectDynamicMemoryVM(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorDynamicMemoryVM.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Dynamic Memory VM metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -58,7 +57,7 @@ func (c *Collector) buildHypervisorVirtualProcessor() error {
|
||||
hypervisorVirtualProcessorRemoteRunTimePercent,
|
||||
hypervisorVirtualProcessorCPUWaitTimePerDispatch,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Hypervisor Virtual Processor collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -86,7 +85,7 @@ func (c *Collector) buildHypervisorVirtualProcessor() error {
|
||||
|
||||
func (c *Collector) collectHypervisorVirtualProcessor(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorHypervisorVirtualProcessor.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Hypervisor Virtual Processor metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -56,7 +55,7 @@ func (c *Collector) buildLegacyNetworkAdapter() error {
|
||||
legacyNetworkAdapterFramesReceived,
|
||||
legacyNetworkAdapterFramesSent,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Legacy Network Adapter collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -102,7 +101,7 @@ func (c *Collector) buildLegacyNetworkAdapter() error {
|
||||
|
||||
func (c *Collector) collectLegacyNetworkAdapter(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorLegacyNetworkAdapter.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Legacy Network Adapter metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -46,7 +45,7 @@ func (c *Collector) buildVirtualMachineVidPartition() error {
|
||||
preferredNUMANodeIndex,
|
||||
remotePhysicalPages,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V VM Vid Partition collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -74,7 +73,7 @@ func (c *Collector) buildVirtualMachineVidPartition() error {
|
||||
|
||||
func (c *Collector) collectVirtualMachineVidPartition(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorVirtualMachineVidPartition.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V VM Vid Partition metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -56,7 +55,7 @@ func (c *Collector) buildVirtualNetworkAdapter() error {
|
||||
virtualNetworkAdapterPacketsReceived,
|
||||
virtualNetworkAdapterPacketsSent,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -102,7 +101,7 @@ func (c *Collector) buildVirtualNetworkAdapter() error {
|
||||
|
||||
func (c *Collector) collectVirtualNetworkAdapter(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorVirtualNetworkAdapter.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Virtual Network Adapter metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -215,7 +214,7 @@ func (c *Collector) buildVirtualNetworkAdapterDropReasons() error {
|
||||
virtualNetworkAdapterDropReasonsOutgoingUnknown,
|
||||
virtualNetworkAdapterDropReasonsIncomingUnknown,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter Drop Reasons collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -231,7 +230,7 @@ func (c *Collector) buildVirtualNetworkAdapterDropReasons() error {
|
||||
|
||||
func (c *Collector) collectVirtualNetworkAdapterDropReasons(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorVirtualNetworkAdapterDropReasons.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Virtual Network Adapter Drop Reasons metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -89,7 +88,7 @@ func (c *Collector) buildVirtualSMB() error {
|
||||
virtualSMBSentBytes,
|
||||
virtualSMBReceivedBytes,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Virtual SMB collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -201,7 +200,7 @@ func (c *Collector) buildVirtualSMB() error {
|
||||
|
||||
func (c *Collector) collectVirtualSMB(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorVirtualSMB.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Virtual SMB metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -74,7 +73,7 @@ func (c *Collector) buildVirtualStorageDevice() error {
|
||||
virtualStorageDeviceLowerLatency,
|
||||
virtualStorageDeviceIOQuotaReplenishmentRate,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Virtual Storage Device collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -156,7 +155,7 @@ func (c *Collector) buildVirtualStorageDevice() error {
|
||||
|
||||
func (c *Collector) collectVirtualStorageDevice(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorVirtualStorageDevice.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Virtual Storage Device metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
package hyperv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -100,7 +99,7 @@ func (c *Collector) buildVirtualSwitch() error {
|
||||
virtualSwitchPacketsSent,
|
||||
virtualSwitchPurgedMacAddresses,
|
||||
})
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hyper-V Virtual Switch collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -236,7 +235,7 @@ func (c *Collector) buildVirtualSwitch() error {
|
||||
|
||||
func (c *Collector) collectVirtualSwitch(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollectorVirtualSwitch.Collect()
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Hyper-V Virtual Switch metrics: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ type Config struct {
|
||||
AppExclude *regexp.Regexp `yaml:"app_exclude"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
SiteInclude: types.RegExpAny,
|
||||
SiteExclude: types.RegExpEmpty,
|
||||
@@ -150,8 +151,8 @@ func (c *Collector) GetName() string {
|
||||
func (c *Collector) Close() error {
|
||||
c.perfDataCollectorWebService.Close()
|
||||
c.perfDataCollectorAppPoolWAS.Close()
|
||||
c.perfDataCollectorW3SVCW3WP.Close()
|
||||
c.perfDataCollectorWebServiceCache.Close()
|
||||
c.w3SVCW3WPPerfDataCollector.Close()
|
||||
c.serviceCachePerfDataCollector.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -168,23 +169,25 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
prometheus.Labels{"version": fmt.Sprintf("%d.%d", c.iisVersion.major, c.iisVersion.minor)},
|
||||
)
|
||||
|
||||
errs := make([]error, 0, 4)
|
||||
|
||||
if err := c.buildWebService(); err != nil {
|
||||
return fmt.Errorf("failed to build Web Service collector: %w", err)
|
||||
errs = append(errs, fmt.Errorf("failed to build Web Service collector: %w", err))
|
||||
}
|
||||
|
||||
if err := c.buildAppPoolWAS(); err != nil {
|
||||
return fmt.Errorf("failed to build APP_POOL_WAS collector: %w", err)
|
||||
errs = append(errs, fmt.Errorf("failed to build APP_POOL_WAS collector: %w", err))
|
||||
}
|
||||
|
||||
if err := c.buildW3SVCW3WP(); err != nil {
|
||||
return fmt.Errorf("failed to build W3SVC_W3WP collector: %w", err)
|
||||
errs = append(errs, fmt.Errorf("failed to build W3SVC_W3WP collector: %w", err))
|
||||
}
|
||||
|
||||
if err := c.buildWebServiceCache(); err != nil {
|
||||
return fmt.Errorf("failed to build Web Service Cache collector: %w", err)
|
||||
errs = append(errs, fmt.Errorf("failed to build Web Service Cache collector: %w", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
type simpleVersion struct {
|
||||
@@ -195,7 +198,7 @@ type simpleVersion struct {
|
||||
func (c *Collector) getIISVersion(logger *slog.Logger) simpleVersion {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\InetStp\`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
logger.Warn("Couldn't open registry to determine IIS version",
|
||||
logger.Warn("couldn't open registry to determine IIS version",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -273,7 +276,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
// discarded, and "Site_B#2" would be kept and presented as "Site_B" in the
|
||||
// Collector metrics.
|
||||
// [ "Site_A", "Site_B", "Site_C", "Site_B#2" ].
|
||||
func deduplicateIISNames(counterValues map[string]map[string]perfdata.CounterValues) {
|
||||
func deduplicateIISNames(counterValues map[string]map[string]perfdata.CounterValue) {
|
||||
services := slices.Collect(maps.Keys(counterValues))
|
||||
|
||||
// Ensure IIS entry with the highest suffix occurs last
|
||||
|
||||
@@ -57,6 +57,7 @@ const (
|
||||
TotalWorkerProcessStartupFailures = "Total Worker Process Startup Failures"
|
||||
)
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var applicationStates = map[uint32]string{
|
||||
1: "Uninitialized",
|
||||
2: "Initialized",
|
||||
|
||||
@@ -26,399 +26,385 @@ import (
|
||||
)
|
||||
|
||||
type collectorW3SVCW3WP struct {
|
||||
perfDataCollectorW3SVCW3WP *perfdata.Collector
|
||||
w3SVCW3WPPerfDataCollector *perfdata.Collector
|
||||
|
||||
// W3SVC_W3WP
|
||||
threads *prometheus.Desc
|
||||
maximumThreads *prometheus.Desc
|
||||
w3SVCW3WPThreads *prometheus.Desc
|
||||
w3SVCW3WPMaximumThreads *prometheus.Desc
|
||||
|
||||
requestsTotal *prometheus.Desc
|
||||
requestsActive *prometheus.Desc
|
||||
w3SVCW3WPRequestsTotal *prometheus.Desc
|
||||
w3SVCW3WPRequestsActive *prometheus.Desc
|
||||
|
||||
activeFlushedEntries *prometheus.Desc
|
||||
w3SVCW3WPActiveFlushedEntries *prometheus.Desc
|
||||
|
||||
currentFileCacheMemoryUsage *prometheus.Desc
|
||||
maximumFileCacheMemoryUsage *prometheus.Desc
|
||||
fileCacheFlushesTotal *prometheus.Desc
|
||||
fileCacheQueriesTotal *prometheus.Desc
|
||||
fileCacheHitsTotal *prometheus.Desc
|
||||
filesCached *prometheus.Desc
|
||||
filesCachedTotal *prometheus.Desc
|
||||
filesFlushedTotal *prometheus.Desc
|
||||
w3SVCW3WPCurrentFileCacheMemoryUsage *prometheus.Desc
|
||||
w3SVCW3WPMaximumFileCacheMemoryUsage *prometheus.Desc
|
||||
w3SVCW3WPFileCacheFlushesTotal *prometheus.Desc
|
||||
w3SVCW3WPFileCacheQueriesTotal *prometheus.Desc
|
||||
w3SVCW3WPFileCacheHitsTotal *prometheus.Desc
|
||||
w3SVCW3WPFilesCached *prometheus.Desc
|
||||
w3SVCW3WPFilesCachedTotal *prometheus.Desc
|
||||
w3SVCW3WPFilesFlushedTotal *prometheus.Desc
|
||||
|
||||
uriCacheFlushesTotal *prometheus.Desc
|
||||
uriCacheQueriesTotal *prometheus.Desc
|
||||
uriCacheHitsTotal *prometheus.Desc
|
||||
urisCached *prometheus.Desc
|
||||
urisCachedTotal *prometheus.Desc
|
||||
urisFlushedTotal *prometheus.Desc
|
||||
w3SVCW3WPURICacheFlushesTotal *prometheus.Desc
|
||||
w3SVCW3WPURICacheQueriesTotal *prometheus.Desc
|
||||
w3SVCW3WPURICacheHitsTotal *prometheus.Desc
|
||||
w3SVCW3WPURIsCached *prometheus.Desc
|
||||
w3SVCW3WPURIsCachedTotal *prometheus.Desc
|
||||
w3SVCW3WPURIsFlushedTotal *prometheus.Desc
|
||||
|
||||
metadataCached *prometheus.Desc
|
||||
metadataCacheFlushes *prometheus.Desc
|
||||
metadataCacheQueriesTotal *prometheus.Desc
|
||||
metadataCacheHitsTotal *prometheus.Desc
|
||||
metadataCachedTotal *prometheus.Desc
|
||||
metadataFlushedTotal *prometheus.Desc
|
||||
w3SVCW3WPMetadataCached *prometheus.Desc
|
||||
w3SVCW3WPMetadataCacheFlushes *prometheus.Desc
|
||||
w3SVCW3WPMetadataCacheQueriesTotal *prometheus.Desc
|
||||
w3SVCW3WPMetadataCacheHitsTotal *prometheus.Desc
|
||||
w3SVCW3WPMetadataCachedTotal *prometheus.Desc
|
||||
w3SVCW3WPMetadataFlushedTotal *prometheus.Desc
|
||||
|
||||
outputCacheActiveFlushedItems *prometheus.Desc
|
||||
outputCacheItems *prometheus.Desc
|
||||
outputCacheMemoryUsage *prometheus.Desc
|
||||
outputCacheQueriesTotal *prometheus.Desc
|
||||
outputCacheHitsTotal *prometheus.Desc
|
||||
outputCacheFlushedItemsTotal *prometheus.Desc
|
||||
outputCacheFlushesTotal *prometheus.Desc
|
||||
w3SVCW3WPOutputCacheActiveFlushedItems *prometheus.Desc
|
||||
w3SVCW3WPOutputCacheItems *prometheus.Desc
|
||||
w3SVCW3WPOutputCacheMemoryUsage *prometheus.Desc
|
||||
w3SVCW3WPOutputCacheQueriesTotal *prometheus.Desc
|
||||
w3SVCW3WPOutputCacheHitsTotal *prometheus.Desc
|
||||
w3SVCW3WPOutputCacheFlushedItemsTotal *prometheus.Desc
|
||||
w3SVCW3WPOutputCacheFlushesTotal *prometheus.Desc
|
||||
|
||||
// IIS 8+ Only
|
||||
requestErrorsTotal *prometheus.Desc
|
||||
webSocketRequestsActive *prometheus.Desc
|
||||
webSocketConnectionAttempts *prometheus.Desc
|
||||
webSocketConnectionsAccepted *prometheus.Desc
|
||||
webSocketConnectionsRejected *prometheus.Desc
|
||||
w3SVCW3WPRequestErrorsTotal *prometheus.Desc
|
||||
w3SVCW3WPWebSocketRequestsActive *prometheus.Desc
|
||||
w3SVCW3WPWebSocketConnectionAttempts *prometheus.Desc
|
||||
w3SVCW3WPWebSocketConnectionsAccepted *prometheus.Desc
|
||||
w3SVCW3WPWebSocketConnectionsRejected *prometheus.Desc
|
||||
}
|
||||
|
||||
var workerProcessNameExtractor = regexp.MustCompile(`^(\d+)_(.+)$`)
|
||||
|
||||
const (
|
||||
Threads = "Active Threads Count"
|
||||
MaximumThreads = "Maximum Threads Count"
|
||||
w3SVCW3WPThreads = "Active Threads Count"
|
||||
w3SVCW3WPMaximumThreads = "Maximum Threads Count"
|
||||
|
||||
RequestsTotal = "Total HTTP Requests Served"
|
||||
RequestsActive = "Active Requests"
|
||||
w3SVCW3WPRequestsTotal = "Total HTTP Requests Served"
|
||||
w3SVCW3WPRequestsActive = "Active Requests"
|
||||
|
||||
ActiveFlushedEntries = "Active Flushed Entries"
|
||||
w3SVCW3WPActiveFlushedEntries = "Active Flushed Entries"
|
||||
|
||||
CurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
|
||||
MaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
|
||||
FileCacheFlushesTotal = "File Cache Flushes"
|
||||
FileCacheHitsTotal = "File Cache Hits"
|
||||
FileCacheMissesTotal = "File Cache Misses"
|
||||
FilesCached = "Current Files Cached"
|
||||
FilesCachedTotal = "Total Files Cached"
|
||||
FilesFlushedTotal = "Total Flushed Files"
|
||||
w3SVCW3WPCurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
|
||||
w3SVCW3WPMaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
|
||||
w3SVCW3WPFileCacheFlushesTotal = "File Cache Flushes"
|
||||
w3SVCW3WPFileCacheHitsTotal = "File Cache Hits"
|
||||
w3SVCW3WPFileCacheMissesTotal = "File Cache Misses"
|
||||
w3SVCW3WPFilesCached = "Current Files Cached"
|
||||
w3SVCW3WPFilesCachedTotal = "Total Files Cached"
|
||||
w3SVCW3WPFilesFlushedTotal = "Total Flushed Files"
|
||||
|
||||
URICacheFlushesTotal = "Total Flushed URIs"
|
||||
URICacheFlushesTotalKernel = "Total Flushed URIs"
|
||||
URIsFlushedTotalKernel = "Kernel: Total Flushed URIs"
|
||||
URICacheHitsTotal = "URI Cache Hits"
|
||||
URICacheHitsTotalKernel = "Kernel: URI Cache Hits"
|
||||
URICacheMissesTotal = "URI Cache Misses"
|
||||
URICacheMissesTotalKernel = "Kernel: URI Cache Misses"
|
||||
URIsCached = "Current URIs Cached"
|
||||
URIsCachedKernel = "Kernel: Current URIs Cached"
|
||||
URIsCachedTotal = "Total URIs Cached"
|
||||
URIsCachedTotalKernel = "Total URIs Cached"
|
||||
URIsFlushedTotal = "Total Flushed URIs"
|
||||
w3SVCW3WPURICacheFlushesTotal = "Total Flushed URIs"
|
||||
w3SVCW3WPURICacheHitsTotal = "URI Cache Hits"
|
||||
w3SVCW3WPURICacheMissesTotal = "URI Cache Misses"
|
||||
w3SVCW3WPURIsCached = "Current URIs Cached"
|
||||
w3SVCW3WPURIsCachedTotal = "Total URIs Cached"
|
||||
w3SVCW3WPURIsFlushedTotal = "Total Flushed URIs"
|
||||
|
||||
MetaDataCacheHits = "Metadata Cache Hits"
|
||||
MetaDataCacheMisses = "Metadata Cache Misses"
|
||||
MetadataCached = "Current Metadata Cached"
|
||||
MetadataCacheFlushes = "Metadata Cache Flushes"
|
||||
MetadataCachedTotal = "Total Metadata Cached"
|
||||
MetadataFlushedTotal = "Total Flushed Metadata"
|
||||
w3SVCW3WPMetaDataCacheHits = "Metadata Cache Hits"
|
||||
w3SVCW3WPMetaDataCacheMisses = "Metadata Cache Misses"
|
||||
w3SVCW3WPMetadataCached = "Current Metadata Cached"
|
||||
w3SVCW3WPMetadataCacheFlushes = "Metadata Cache Flushes"
|
||||
w3SVCW3WPMetadataCachedTotal = "Total Metadata Cached"
|
||||
w3SVCW3WPMetadataFlushedTotal = "Total Flushed Metadata"
|
||||
|
||||
OutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
|
||||
OutputCacheItems = "Output Cache Current Items"
|
||||
OutputCacheMemoryUsage = "Output Cache Current Memory Usage"
|
||||
OutputCacheHitsTotal = "Output Cache Total Hits"
|
||||
OutputCacheMissesTotal = "Output Cache Total Misses"
|
||||
OutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
|
||||
OutputCacheFlushesTotal = "Output Cache Total Flushes"
|
||||
w3SVCW3WPOutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
|
||||
w3SVCW3WPOutputCacheItems = "Output Cache Current Items"
|
||||
w3SVCW3WPOutputCacheMemoryUsage = "Output Cache Current Memory Usage"
|
||||
w3SVCW3WPOutputCacheHitsTotal = "Output Cache Total Hits"
|
||||
w3SVCW3WPOutputCacheMissesTotal = "Output Cache Total Misses"
|
||||
w3SVCW3WPOutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
|
||||
w3SVCW3WPOutputCacheFlushesTotal = "Output Cache Total Flushes"
|
||||
|
||||
// IIS8
|
||||
RequestErrors500 = "% 500 HTTP Response Sent"
|
||||
RequestErrors503 = "% 503 HTTP Response Sent"
|
||||
RequestErrors404 = "% 404 HTTP Response Sent"
|
||||
RequestErrors403 = "% 403 HTTP Response Sent"
|
||||
RequestErrors401 = "% 401 HTTP Response Sent"
|
||||
w3SVCW3WPRequestErrors500 = "% 500 HTTP Response Sent"
|
||||
w3SVCW3WPRequestErrors404 = "% 404 HTTP Response Sent"
|
||||
w3SVCW3WPRequestErrors403 = "% 403 HTTP Response Sent"
|
||||
w3SVCW3WPRequestErrors401 = "% 401 HTTP Response Sent"
|
||||
|
||||
WebSocketRequestsActive = "WebSocket Active Requests"
|
||||
WebSocketConnectionAttempts = "WebSocket Connection Attempts / Sec"
|
||||
WebSocketConnectionsAccepted = "WebSocket Connections Accepted / Sec"
|
||||
WebSocketConnectionsRejected = "WebSocket Connections Rejected / Sec"
|
||||
w3SVCW3WPWebSocketRequestsActive = "WebSocket Active Requests"
|
||||
w3SVCW3WPWebSocketConnectionAttempts = "WebSocket Connection Attempts / Sec"
|
||||
w3SVCW3WPWebSocketConnectionsAccepted = "WebSocket Connections Accepted / Sec"
|
||||
w3SVCW3WPWebSocketConnectionsRejected = "WebSocket Connections Rejected / Sec"
|
||||
)
|
||||
|
||||
func (c *Collector) buildW3SVCW3WP() error {
|
||||
counters := []string{
|
||||
Threads,
|
||||
MaximumThreads,
|
||||
RequestsTotal,
|
||||
RequestsActive,
|
||||
ActiveFlushedEntries,
|
||||
CurrentFileCacheMemoryUsage,
|
||||
MaximumFileCacheMemoryUsage,
|
||||
FileCacheFlushesTotal,
|
||||
FileCacheHitsTotal,
|
||||
FileCacheMissesTotal,
|
||||
FilesCached,
|
||||
FilesCachedTotal,
|
||||
FilesFlushedTotal,
|
||||
URICacheFlushesTotal,
|
||||
URICacheFlushesTotalKernel,
|
||||
URIsFlushedTotalKernel,
|
||||
URICacheHitsTotal,
|
||||
URICacheHitsTotalKernel,
|
||||
URICacheMissesTotal,
|
||||
URICacheMissesTotalKernel,
|
||||
URIsCached,
|
||||
URIsCachedKernel,
|
||||
URIsCachedTotal,
|
||||
URIsCachedTotalKernel,
|
||||
URIsFlushedTotal,
|
||||
MetaDataCacheHits,
|
||||
MetaDataCacheMisses,
|
||||
MetadataCached,
|
||||
MetadataCacheFlushes,
|
||||
MetadataCachedTotal,
|
||||
MetadataFlushedTotal,
|
||||
OutputCacheActiveFlushedItems,
|
||||
OutputCacheItems,
|
||||
OutputCacheMemoryUsage,
|
||||
OutputCacheHitsTotal,
|
||||
OutputCacheMissesTotal,
|
||||
OutputCacheFlushedItemsTotal,
|
||||
OutputCacheFlushesTotal,
|
||||
w3SVCW3WPThreads,
|
||||
w3SVCW3WPMaximumThreads,
|
||||
w3SVCW3WPRequestsTotal,
|
||||
w3SVCW3WPRequestsActive,
|
||||
w3SVCW3WPActiveFlushedEntries,
|
||||
w3SVCW3WPCurrentFileCacheMemoryUsage,
|
||||
w3SVCW3WPMaximumFileCacheMemoryUsage,
|
||||
w3SVCW3WPFileCacheFlushesTotal,
|
||||
w3SVCW3WPFileCacheHitsTotal,
|
||||
w3SVCW3WPFileCacheMissesTotal,
|
||||
w3SVCW3WPFilesCached,
|
||||
w3SVCW3WPFilesCachedTotal,
|
||||
w3SVCW3WPFilesFlushedTotal,
|
||||
w3SVCW3WPURICacheFlushesTotal,
|
||||
w3SVCW3WPURICacheHitsTotal,
|
||||
w3SVCW3WPURICacheMissesTotal,
|
||||
w3SVCW3WPURIsCached,
|
||||
w3SVCW3WPURIsCachedTotal,
|
||||
w3SVCW3WPURIsFlushedTotal,
|
||||
w3SVCW3WPMetaDataCacheHits,
|
||||
w3SVCW3WPMetaDataCacheMisses,
|
||||
w3SVCW3WPMetadataCached,
|
||||
w3SVCW3WPMetadataCacheFlushes,
|
||||
w3SVCW3WPMetadataCachedTotal,
|
||||
w3SVCW3WPMetadataFlushedTotal,
|
||||
w3SVCW3WPOutputCacheActiveFlushedItems,
|
||||
w3SVCW3WPOutputCacheItems,
|
||||
w3SVCW3WPOutputCacheMemoryUsage,
|
||||
w3SVCW3WPOutputCacheHitsTotal,
|
||||
w3SVCW3WPOutputCacheMissesTotal,
|
||||
w3SVCW3WPOutputCacheFlushedItemsTotal,
|
||||
w3SVCW3WPOutputCacheFlushesTotal,
|
||||
}
|
||||
|
||||
if c.iisVersion.major >= 8 {
|
||||
counters = append(counters, []string{
|
||||
RequestErrors500,
|
||||
RequestErrors503,
|
||||
RequestErrors404,
|
||||
RequestErrors403,
|
||||
RequestErrors401,
|
||||
WebSocketRequestsActive,
|
||||
WebSocketConnectionAttempts,
|
||||
WebSocketConnectionsAccepted,
|
||||
WebSocketConnectionsRejected,
|
||||
w3SVCW3WPRequestErrors500,
|
||||
w3SVCW3WPRequestErrors404,
|
||||
w3SVCW3WPRequestErrors403,
|
||||
w3SVCW3WPRequestErrors401,
|
||||
w3SVCW3WPWebSocketRequestsActive,
|
||||
w3SVCW3WPWebSocketConnectionAttempts,
|
||||
w3SVCW3WPWebSocketConnectionsAccepted,
|
||||
w3SVCW3WPWebSocketConnectionsRejected,
|
||||
}...)
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorW3SVCW3WP, err = perfdata.NewCollector("W3SVC_W3WP", perfdata.InstancesAll, counters)
|
||||
c.w3SVCW3WPPerfDataCollector, err = perfdata.NewCollector("W3SVC_W3WP", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
|
||||
}
|
||||
|
||||
// W3SVC_W3WP
|
||||
c.threads = prometheus.NewDesc(
|
||||
c.w3SVCW3WPThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_threads"),
|
||||
"Number of threads actively processing requests in the worker process",
|
||||
[]string{"app", "pid", "state"},
|
||||
nil,
|
||||
)
|
||||
c.maximumThreads = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMaximumThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_max_threads"),
|
||||
"Maximum number of threads to which the thread pool can grow as needed",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.requestsTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPRequestsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_requests_total"),
|
||||
"Total number of HTTP requests served by the worker process",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.requestsActive = prometheus.NewDesc(
|
||||
c.w3SVCW3WPRequestsActive = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_current_requests"),
|
||||
"Current number of requests being processed by the worker process",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.activeFlushedEntries = prometheus.NewDesc(
|
||||
c.w3SVCW3WPActiveFlushedEntries = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_cache_active_flushed_entries"),
|
||||
"Number of file handles cached in user-mode that will be closed when all current transfers complete.",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.currentFileCacheMemoryUsage = prometheus.NewDesc(
|
||||
c.w3SVCW3WPCurrentFileCacheMemoryUsage = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_memory_bytes"),
|
||||
"Current number of bytes used by user-mode file cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.maximumFileCacheMemoryUsage = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMaximumFileCacheMemoryUsage = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_max_memory_bytes"),
|
||||
"Maximum number of bytes used by user-mode file cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.fileCacheFlushesTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPFileCacheFlushesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_flushes_total"),
|
||||
"Total number of files removed from the user-mode cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.fileCacheQueriesTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPFileCacheQueriesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_queries_total"),
|
||||
"Total file cache queries (hits + misses)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.fileCacheHitsTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPFileCacheHitsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_hits_total"),
|
||||
"Total number of successful lookups in the user-mode file cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.filesCached = prometheus.NewDesc(
|
||||
c.w3SVCW3WPFilesCached = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_items"),
|
||||
"Current number of files whose contents are present in user-mode cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.filesCachedTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPFilesCachedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_items_total"),
|
||||
"Total number of files whose contents were ever added to the user-mode cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.filesFlushedTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPFilesFlushedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_items_flushed_total"),
|
||||
"Total number of file handles that have been removed from the user-mode cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.uriCacheFlushesTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPURICacheFlushesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_flushes_total"),
|
||||
"Total number of URI cache flushes (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.uriCacheQueriesTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPURICacheQueriesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_queries_total"),
|
||||
"Total number of uri cache queries (hits + misses)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.uriCacheHitsTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPURICacheHitsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_hits_total"),
|
||||
"Total number of successful lookups in the user-mode URI cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.urisCached = prometheus.NewDesc(
|
||||
c.w3SVCW3WPURIsCached = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_items"),
|
||||
"Number of URI information blocks currently in the user-mode cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.urisCachedTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPURIsCachedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_items_total"),
|
||||
"Total number of URI information blocks added to the user-mode cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.urisFlushedTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPURIsFlushedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_items_flushed_total"),
|
||||
"The number of URI information blocks that have been removed from the user-mode cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.metadataCached = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMetadataCached = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_items"),
|
||||
"Number of metadata information blocks currently present in user-mode cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.metadataCacheFlushes = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMetadataCacheFlushes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_flushes_total"),
|
||||
"Total number of user-mode metadata cache flushes (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.metadataCacheQueriesTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMetadataCacheQueriesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_queries_total"),
|
||||
"Total metadata cache queries (hits + misses)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.metadataCacheHitsTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMetadataCacheHitsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_hits_total"),
|
||||
"Total number of successful lookups in the user-mode metadata cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.metadataCachedTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMetadataCachedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_items_cached_total"),
|
||||
"Total number of metadata information blocks added to the user-mode cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.metadataFlushedTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPMetadataFlushedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_items_flushed_total"),
|
||||
"Total number of metadata information blocks removed from the user-mode cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.outputCacheActiveFlushedItems = prometheus.NewDesc(
|
||||
c.w3SVCW3WPOutputCacheActiveFlushedItems = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_active_flushed_items"),
|
||||
"",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.outputCacheItems = prometheus.NewDesc(
|
||||
c.w3SVCW3WPOutputCacheItems = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_items"),
|
||||
"Number of items current present in output cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.outputCacheMemoryUsage = prometheus.NewDesc(
|
||||
c.w3SVCW3WPOutputCacheMemoryUsage = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_memory_bytes"),
|
||||
"Current number of bytes used by output cache",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.outputCacheQueriesTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPOutputCacheQueriesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_output_queries_total"),
|
||||
"Total number of output cache queries (hits + misses)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.outputCacheHitsTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPOutputCacheHitsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_hits_total"),
|
||||
"Total number of successful lookups in output cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.outputCacheFlushedItemsTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPOutputCacheFlushedItemsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_items_flushed_total"),
|
||||
"Total number of items flushed from output cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.outputCacheFlushesTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPOutputCacheFlushesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_flushes_total"),
|
||||
"Total number of flushes of output cache (since service startup)",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
// W3SVC_W3WP_IIS8
|
||||
c.requestErrorsTotal = prometheus.NewDesc(
|
||||
c.w3SVCW3WPRequestErrorsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_request_errors_total"),
|
||||
"Total number of requests that returned an error",
|
||||
[]string{"app", "pid", "status_code"},
|
||||
nil,
|
||||
)
|
||||
c.webSocketRequestsActive = prometheus.NewDesc(
|
||||
c.w3SVCW3WPWebSocketRequestsActive = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_current_websocket_requests"),
|
||||
"",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.webSocketConnectionAttempts = prometheus.NewDesc(
|
||||
c.w3SVCW3WPWebSocketConnectionAttempts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_websocket_connection_attempts_total"),
|
||||
"",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.webSocketConnectionsAccepted = prometheus.NewDesc(
|
||||
c.w3SVCW3WPWebSocketConnectionsAccepted = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_websocket_connection_accepted_total"),
|
||||
"",
|
||||
[]string{"app", "pid"},
|
||||
nil,
|
||||
)
|
||||
c.webSocketConnectionsRejected = prometheus.NewDesc(
|
||||
c.w3SVCW3WPWebSocketConnectionsRejected = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "worker_websocket_connection_rejected_total"),
|
||||
"",
|
||||
[]string{"app", "pid"},
|
||||
@@ -429,7 +415,7 @@ func (c *Collector) buildW3SVCW3WP() error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectW3SVCW3WP(ch chan<- prometheus.Metric) error {
|
||||
perfData, err := c.perfDataCollectorW3SVCW3WP.Collect()
|
||||
perfData, err := c.w3SVCW3WPPerfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect APP_POOL_WAS metrics: %w", err)
|
||||
}
|
||||
@@ -456,297 +442,289 @@ func (c *Collector) collectW3SVCW3WP(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.threads,
|
||||
c.w3SVCW3WPThreads,
|
||||
prometheus.GaugeValue,
|
||||
app[Threads].FirstValue,
|
||||
app[w3SVCW3WPThreads].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
"busy",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.maximumThreads,
|
||||
c.w3SVCW3WPMaximumThreads,
|
||||
prometheus.CounterValue,
|
||||
app[MaximumThreads].FirstValue,
|
||||
app[w3SVCW3WPMaximumThreads].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestsTotal,
|
||||
c.w3SVCW3WPRequestsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[RequestsTotal].FirstValue,
|
||||
app[w3SVCW3WPRequestsTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestsActive,
|
||||
c.w3SVCW3WPRequestsActive,
|
||||
prometheus.CounterValue,
|
||||
app[RequestsActive].FirstValue,
|
||||
app[w3SVCW3WPRequestsActive].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.activeFlushedEntries,
|
||||
c.w3SVCW3WPActiveFlushedEntries,
|
||||
prometheus.GaugeValue,
|
||||
app[ActiveFlushedEntries].FirstValue,
|
||||
app[w3SVCW3WPActiveFlushedEntries].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentFileCacheMemoryUsage,
|
||||
c.w3SVCW3WPCurrentFileCacheMemoryUsage,
|
||||
prometheus.GaugeValue,
|
||||
app[CurrentFileCacheMemoryUsage].FirstValue,
|
||||
app[w3SVCW3WPCurrentFileCacheMemoryUsage].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.maximumFileCacheMemoryUsage,
|
||||
c.w3SVCW3WPMaximumFileCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
app[MaximumFileCacheMemoryUsage].FirstValue,
|
||||
app[w3SVCW3WPMaximumFileCacheMemoryUsage].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fileCacheFlushesTotal,
|
||||
c.w3SVCW3WPFileCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[FileCacheFlushesTotal].FirstValue,
|
||||
app[w3SVCW3WPFileCacheFlushesTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fileCacheQueriesTotal,
|
||||
c.w3SVCW3WPFileCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[FileCacheHitsTotal].FirstValue+app[FileCacheMissesTotal].FirstValue,
|
||||
app[w3SVCW3WPFileCacheHitsTotal].FirstValue+app[w3SVCW3WPFileCacheMissesTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fileCacheHitsTotal,
|
||||
c.w3SVCW3WPFileCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[FileCacheHitsTotal].FirstValue,
|
||||
app[w3SVCW3WPFileCacheHitsTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.filesCached,
|
||||
c.w3SVCW3WPFilesCached,
|
||||
prometheus.GaugeValue,
|
||||
app[FilesCached].FirstValue,
|
||||
app[w3SVCW3WPFilesCached].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.filesCachedTotal,
|
||||
c.w3SVCW3WPFilesCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[FilesCachedTotal].FirstValue,
|
||||
app[w3SVCW3WPFilesCachedTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.filesFlushedTotal,
|
||||
c.w3SVCW3WPFilesFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[FilesFlushedTotal].FirstValue,
|
||||
app[w3SVCW3WPFilesFlushedTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.uriCacheFlushesTotal,
|
||||
c.w3SVCW3WPURICacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[URICacheFlushesTotal].FirstValue,
|
||||
app[w3SVCW3WPURICacheFlushesTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.uriCacheQueriesTotal,
|
||||
c.w3SVCW3WPURICacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[URICacheHitsTotal].FirstValue+app[URICacheMissesTotal].FirstValue,
|
||||
app[w3SVCW3WPURICacheHitsTotal].FirstValue+app[w3SVCW3WPURICacheMissesTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.uriCacheHitsTotal,
|
||||
c.w3SVCW3WPURICacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[URICacheHitsTotal].FirstValue,
|
||||
app[w3SVCW3WPURICacheHitsTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.urisCached,
|
||||
c.w3SVCW3WPURIsCached,
|
||||
prometheus.GaugeValue,
|
||||
app[URIsCached].FirstValue,
|
||||
app[w3SVCW3WPURIsCached].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.urisCachedTotal,
|
||||
c.w3SVCW3WPURIsCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[URIsCachedTotal].FirstValue,
|
||||
app[w3SVCW3WPURIsCachedTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.urisFlushedTotal,
|
||||
c.w3SVCW3WPURIsFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[URIsFlushedTotal].FirstValue,
|
||||
app[w3SVCW3WPURIsFlushedTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataCached,
|
||||
c.w3SVCW3WPMetadataCached,
|
||||
prometheus.GaugeValue,
|
||||
app[MetadataCached].FirstValue,
|
||||
app[w3SVCW3WPMetadataCached].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataCacheFlushes,
|
||||
c.w3SVCW3WPMetadataCacheFlushes,
|
||||
prometheus.CounterValue,
|
||||
app[MetadataCacheFlushes].FirstValue,
|
||||
app[w3SVCW3WPMetadataCacheFlushes].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataCacheQueriesTotal,
|
||||
c.w3SVCW3WPMetadataCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[MetaDataCacheHits].FirstValue+app[MetaDataCacheMisses].FirstValue,
|
||||
app[w3SVCW3WPMetaDataCacheHits].FirstValue+app[w3SVCW3WPMetaDataCacheMisses].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataCacheHitsTotal,
|
||||
c.w3SVCW3WPMetadataCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[MetaDataCacheHits].FirstValue,
|
||||
app[w3SVCW3WPMetaDataCacheHits].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataCachedTotal,
|
||||
c.w3SVCW3WPMetadataCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[MetadataCachedTotal].FirstValue,
|
||||
app[w3SVCW3WPMetadataCachedTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataFlushedTotal,
|
||||
c.w3SVCW3WPMetadataFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[MetadataFlushedTotal].FirstValue,
|
||||
app[w3SVCW3WPMetadataFlushedTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputCacheActiveFlushedItems,
|
||||
c.w3SVCW3WPOutputCacheActiveFlushedItems,
|
||||
prometheus.CounterValue,
|
||||
app[OutputCacheActiveFlushedItems].FirstValue,
|
||||
app[w3SVCW3WPOutputCacheActiveFlushedItems].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputCacheItems,
|
||||
c.w3SVCW3WPOutputCacheItems,
|
||||
prometheus.CounterValue,
|
||||
app[OutputCacheItems].FirstValue,
|
||||
app[w3SVCW3WPOutputCacheItems].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputCacheMemoryUsage,
|
||||
c.w3SVCW3WPOutputCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
app[OutputCacheMemoryUsage].FirstValue,
|
||||
app[w3SVCW3WPOutputCacheMemoryUsage].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputCacheQueriesTotal,
|
||||
c.w3SVCW3WPOutputCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[OutputCacheHitsTotal].FirstValue+app[OutputCacheMissesTotal].FirstValue,
|
||||
app[w3SVCW3WPOutputCacheHitsTotal].FirstValue+app[w3SVCW3WPOutputCacheMissesTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputCacheHitsTotal,
|
||||
c.w3SVCW3WPOutputCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[OutputCacheHitsTotal].FirstValue,
|
||||
app[w3SVCW3WPOutputCacheHitsTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputCacheFlushedItemsTotal,
|
||||
c.w3SVCW3WPOutputCacheFlushedItemsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[OutputCacheFlushedItemsTotal].FirstValue,
|
||||
app[w3SVCW3WPOutputCacheFlushedItemsTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputCacheFlushesTotal,
|
||||
c.w3SVCW3WPOutputCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[OutputCacheFlushesTotal].FirstValue,
|
||||
app[w3SVCW3WPOutputCacheFlushesTotal].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
|
||||
if c.iisVersion.major >= 8 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestErrorsTotal,
|
||||
c.w3SVCW3WPRequestErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[RequestErrors401].FirstValue,
|
||||
app[w3SVCW3WPRequestErrors401].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
"401",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestErrorsTotal,
|
||||
c.w3SVCW3WPRequestErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[RequestErrors403].FirstValue,
|
||||
app[w3SVCW3WPRequestErrors403].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
"403",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestErrorsTotal,
|
||||
c.w3SVCW3WPRequestErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[RequestErrors404].FirstValue,
|
||||
app[w3SVCW3WPRequestErrors404].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
"404",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestErrorsTotal,
|
||||
c.w3SVCW3WPRequestErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[RequestErrors500].FirstValue,
|
||||
app[w3SVCW3WPRequestErrors500].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
"500",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestErrorsTotal,
|
||||
c.w3SVCW3WPWebSocketRequestsActive,
|
||||
prometheus.CounterValue,
|
||||
app[RequestErrors503].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
"503",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.webSocketRequestsActive,
|
||||
prometheus.CounterValue,
|
||||
app[WebSocketRequestsActive].FirstValue,
|
||||
app[w3SVCW3WPWebSocketRequestsActive].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.webSocketConnectionAttempts,
|
||||
c.w3SVCW3WPWebSocketConnectionAttempts,
|
||||
prometheus.CounterValue,
|
||||
app[WebSocketConnectionAttempts].FirstValue,
|
||||
app[w3SVCW3WPWebSocketConnectionAttempts].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.webSocketConnectionsAccepted,
|
||||
c.w3SVCW3WPWebSocketConnectionsAccepted,
|
||||
prometheus.CounterValue,
|
||||
app[WebSocketConnectionsAccepted].FirstValue,
|
||||
app[w3SVCW3WPWebSocketConnectionsAccepted].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.webSocketConnectionsRejected,
|
||||
c.w3SVCW3WPWebSocketConnectionsRejected,
|
||||
prometheus.CounterValue,
|
||||
app[WebSocketConnectionsRejected].FirstValue,
|
||||
app[w3SVCW3WPWebSocketConnectionsRejected].FirstValue,
|
||||
name,
|
||||
pid,
|
||||
)
|
||||
|
||||
@@ -26,243 +26,243 @@ import (
|
||||
type collectorWebService struct {
|
||||
perfDataCollectorWebService *perfdata.Collector
|
||||
|
||||
currentAnonymousUsers *prometheus.Desc
|
||||
currentBlockedAsyncIORequests *prometheus.Desc
|
||||
currentCGIRequests *prometheus.Desc
|
||||
currentConnections *prometheus.Desc
|
||||
currentISAPIExtensionRequests *prometheus.Desc
|
||||
currentNonAnonymousUsers *prometheus.Desc
|
||||
serviceUptime *prometheus.Desc
|
||||
totalBytesReceived *prometheus.Desc
|
||||
totalBytesSent *prometheus.Desc
|
||||
totalAnonymousUsers *prometheus.Desc
|
||||
totalBlockedAsyncIORequests *prometheus.Desc
|
||||
totalCGIRequests *prometheus.Desc
|
||||
totalConnectionAttemptsAllInstances *prometheus.Desc
|
||||
totalRequests *prometheus.Desc
|
||||
totalFilesReceived *prometheus.Desc
|
||||
totalFilesSent *prometheus.Desc
|
||||
totalISAPIExtensionRequests *prometheus.Desc
|
||||
totalLockedErrors *prometheus.Desc
|
||||
totalLogonAttempts *prometheus.Desc
|
||||
totalNonAnonymousUsers *prometheus.Desc
|
||||
totalNotFoundErrors *prometheus.Desc
|
||||
totalRejectedAsyncIORequests *prometheus.Desc
|
||||
webServiceCurrentAnonymousUsers *prometheus.Desc
|
||||
webServiceCurrentBlockedAsyncIORequests *prometheus.Desc
|
||||
webServiceCurrentCGIRequests *prometheus.Desc
|
||||
webServiceCurrentConnections *prometheus.Desc
|
||||
webServiceCurrentISAPIExtensionRequests *prometheus.Desc
|
||||
webServiceCurrentNonAnonymousUsers *prometheus.Desc
|
||||
webServiceServiceUptime *prometheus.Desc
|
||||
webServiceTotalBytesReceived *prometheus.Desc
|
||||
webServiceTotalBytesSent *prometheus.Desc
|
||||
webServiceTotalAnonymousUsers *prometheus.Desc
|
||||
webServiceTotalBlockedAsyncIORequests *prometheus.Desc
|
||||
webServiceTotalCGIRequests *prometheus.Desc
|
||||
webServiceTotalConnectionAttemptsAllInstances *prometheus.Desc
|
||||
webServiceTotalRequests *prometheus.Desc
|
||||
webServiceTotalFilesReceived *prometheus.Desc
|
||||
webServiceTotalFilesSent *prometheus.Desc
|
||||
webServiceTotalISAPIExtensionRequests *prometheus.Desc
|
||||
webServiceTotalLockedErrors *prometheus.Desc
|
||||
webServiceTotalLogonAttempts *prometheus.Desc
|
||||
webServiceTotalNonAnonymousUsers *prometheus.Desc
|
||||
webServiceTotalNotFoundErrors *prometheus.Desc
|
||||
webServiceTotalRejectedAsyncIORequests *prometheus.Desc
|
||||
}
|
||||
|
||||
const (
|
||||
CurrentAnonymousUsers = "Current Anonymous Users"
|
||||
CurrentBlockedAsyncIORequests = "Current Blocked Async I/O Requests"
|
||||
CurrentCGIRequests = "Current CGI Requests"
|
||||
CurrentConnections = "Current Connections"
|
||||
CurrentISAPIExtensionRequests = "Current ISAPI Extension Requests"
|
||||
CurrentNonAnonymousUsers = "Current NonAnonymous Users"
|
||||
ServiceUptime = "Service Uptime"
|
||||
TotalBytesReceived = "Total Bytes Received"
|
||||
TotalBytesSent = "Total Bytes Sent"
|
||||
TotalAnonymousUsers = "Total Anonymous Users"
|
||||
TotalBlockedAsyncIORequests = "Total Blocked Async I/O Requests"
|
||||
TotalCGIRequests = "Total CGI Requests"
|
||||
TotalConnectionAttemptsAllInstances = "Total Connection Attempts (all instances)"
|
||||
TotalFilesReceived = "Total Files Received"
|
||||
TotalFilesSent = "Total Files Sent"
|
||||
TotalISAPIExtensionRequests = "Total ISAPI Extension Requests"
|
||||
TotalLockedErrors = "Total Locked Errors"
|
||||
TotalLogonAttempts = "Total Logon Attempts"
|
||||
TotalNonAnonymousUsers = "Total NonAnonymous Users"
|
||||
TotalNotFoundErrors = "Total Not Found Errors"
|
||||
TotalRejectedAsyncIORequests = "Total Rejected Async I/O Requests"
|
||||
TotalCopyRequests = "Total Copy Requests"
|
||||
TotalDeleteRequests = "Total Delete Requests"
|
||||
TotalGetRequests = "Total Get Requests"
|
||||
TotalHeadRequests = "Total Head Requests"
|
||||
TotalLockRequests = "Total Lock Requests"
|
||||
TotalMkcolRequests = "Total Mkcol Requests"
|
||||
TotalMoveRequests = "Total Move Requests"
|
||||
TotalOptionsRequests = "Total Options Requests"
|
||||
TotalOtherRequests = "Total Other Request Methods"
|
||||
TotalPostRequests = "Total Post Requests"
|
||||
TotalPropfindRequests = "Total Propfind Requests"
|
||||
TotalProppatchRequests = "Total Proppatch Requests"
|
||||
TotalPutRequests = "Total Put Requests"
|
||||
TotalSearchRequests = "Total Search Requests"
|
||||
TotalTraceRequests = "Total Trace Requests"
|
||||
TotalUnlockRequests = "Total Unlock Requests"
|
||||
webServiceCurrentAnonymousUsers = "Current Anonymous Users"
|
||||
webServiceCurrentBlockedAsyncIORequests = "Current Blocked Async I/O Requests"
|
||||
webServiceCurrentCGIRequests = "Current CGI Requests"
|
||||
webServiceCurrentConnections = "Current Connections"
|
||||
webServiceCurrentISAPIExtensionRequests = "Current ISAPI Extension Requests"
|
||||
webServiceCurrentNonAnonymousUsers = "Current NonAnonymous Users"
|
||||
webServiceServiceUptime = "Service Uptime"
|
||||
webServiceTotalBytesReceived = "Total Bytes Received"
|
||||
webServiceTotalBytesSent = "Total Bytes Sent"
|
||||
webServiceTotalAnonymousUsers = "Total Anonymous Users"
|
||||
webServiceTotalBlockedAsyncIORequests = "Total Blocked Async I/O Requests"
|
||||
webServiceTotalCGIRequests = "Total CGI Requests"
|
||||
webServiceTotalConnectionAttemptsAllInstances = "Total Connection Attempts (all instances)"
|
||||
webServiceTotalFilesReceived = "Total Files Received"
|
||||
webServiceTotalFilesSent = "Total Files Sent"
|
||||
webServiceTotalISAPIExtensionRequests = "Total ISAPI Extension Requests"
|
||||
webServiceTotalLockedErrors = "Total Locked Errors"
|
||||
webServiceTotalLogonAttempts = "Total Logon Attempts"
|
||||
webServiceTotalNonAnonymousUsers = "Total NonAnonymous Users"
|
||||
webServiceTotalNotFoundErrors = "Total Not Found Errors"
|
||||
webServiceTotalRejectedAsyncIORequests = "Total Rejected Async I/O Requests"
|
||||
webServiceTotalCopyRequests = "Total Copy Requests"
|
||||
webServiceTotalDeleteRequests = "Total Delete Requests"
|
||||
webServiceTotalGetRequests = "Total Get Requests"
|
||||
webServiceTotalHeadRequests = "Total Head Requests"
|
||||
webServiceTotalLockRequests = "Total Lock Requests"
|
||||
webServiceTotalMkcolRequests = "Total Mkcol Requests"
|
||||
webServiceTotalMoveRequests = "Total Move Requests"
|
||||
webServiceTotalOptionsRequests = "Total Options Requests"
|
||||
webServiceTotalOtherRequests = "Total Other Request Methods"
|
||||
webServiceTotalPostRequests = "Total Post Requests"
|
||||
webServiceTotalPropfindRequests = "Total Propfind Requests"
|
||||
webServiceTotalProppatchRequests = "Total Proppatch Requests"
|
||||
webServiceTotalPutRequests = "Total Put Requests"
|
||||
webServiceTotalSearchRequests = "Total Search Requests"
|
||||
webServiceTotalTraceRequests = "Total Trace Requests"
|
||||
webServiceTotalUnlockRequests = "Total Unlock Requests"
|
||||
)
|
||||
|
||||
func (c *Collector) buildWebService() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service", perfdata.InstancesAll, []string{
|
||||
CurrentAnonymousUsers,
|
||||
CurrentBlockedAsyncIORequests,
|
||||
CurrentCGIRequests,
|
||||
CurrentConnections,
|
||||
CurrentISAPIExtensionRequests,
|
||||
CurrentNonAnonymousUsers,
|
||||
ServiceUptime,
|
||||
TotalBytesReceived,
|
||||
TotalBytesSent,
|
||||
TotalAnonymousUsers,
|
||||
TotalBlockedAsyncIORequests,
|
||||
TotalCGIRequests,
|
||||
TotalConnectionAttemptsAllInstances,
|
||||
TotalFilesReceived,
|
||||
TotalFilesSent,
|
||||
TotalISAPIExtensionRequests,
|
||||
TotalLockedErrors,
|
||||
TotalLogonAttempts,
|
||||
TotalNonAnonymousUsers,
|
||||
TotalNotFoundErrors,
|
||||
TotalRejectedAsyncIORequests,
|
||||
TotalCopyRequests,
|
||||
TotalDeleteRequests,
|
||||
TotalGetRequests,
|
||||
TotalHeadRequests,
|
||||
TotalLockRequests,
|
||||
TotalMkcolRequests,
|
||||
TotalMoveRequests,
|
||||
TotalOptionsRequests,
|
||||
TotalOtherRequests,
|
||||
TotalPostRequests,
|
||||
TotalPropfindRequests,
|
||||
TotalProppatchRequests,
|
||||
TotalPutRequests,
|
||||
TotalSearchRequests,
|
||||
TotalTraceRequests,
|
||||
TotalUnlockRequests,
|
||||
webServiceCurrentAnonymousUsers,
|
||||
webServiceCurrentBlockedAsyncIORequests,
|
||||
webServiceCurrentCGIRequests,
|
||||
webServiceCurrentConnections,
|
||||
webServiceCurrentISAPIExtensionRequests,
|
||||
webServiceCurrentNonAnonymousUsers,
|
||||
webServiceServiceUptime,
|
||||
webServiceTotalBytesReceived,
|
||||
webServiceTotalBytesSent,
|
||||
webServiceTotalAnonymousUsers,
|
||||
webServiceTotalBlockedAsyncIORequests,
|
||||
webServiceTotalCGIRequests,
|
||||
webServiceTotalConnectionAttemptsAllInstances,
|
||||
webServiceTotalFilesReceived,
|
||||
webServiceTotalFilesSent,
|
||||
webServiceTotalISAPIExtensionRequests,
|
||||
webServiceTotalLockedErrors,
|
||||
webServiceTotalLogonAttempts,
|
||||
webServiceTotalNonAnonymousUsers,
|
||||
webServiceTotalNotFoundErrors,
|
||||
webServiceTotalRejectedAsyncIORequests,
|
||||
webServiceTotalCopyRequests,
|
||||
webServiceTotalDeleteRequests,
|
||||
webServiceTotalGetRequests,
|
||||
webServiceTotalHeadRequests,
|
||||
webServiceTotalLockRequests,
|
||||
webServiceTotalMkcolRequests,
|
||||
webServiceTotalMoveRequests,
|
||||
webServiceTotalOptionsRequests,
|
||||
webServiceTotalOtherRequests,
|
||||
webServiceTotalPostRequests,
|
||||
webServiceTotalPropfindRequests,
|
||||
webServiceTotalProppatchRequests,
|
||||
webServiceTotalPutRequests,
|
||||
webServiceTotalSearchRequests,
|
||||
webServiceTotalTraceRequests,
|
||||
webServiceTotalUnlockRequests,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Web Service collector: %w", err)
|
||||
}
|
||||
|
||||
c.currentAnonymousUsers = prometheus.NewDesc(
|
||||
c.webServiceCurrentAnonymousUsers = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_anonymous_users"),
|
||||
"Number of users who currently have an anonymous connection using the Web service (WebService.CurrentAnonymousUsers)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.currentBlockedAsyncIORequests = prometheus.NewDesc(
|
||||
c.webServiceCurrentBlockedAsyncIORequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_blocked_async_io_requests"),
|
||||
"Current requests temporarily blocked due to bandwidth throttling settings (WebService.CurrentBlockedAsyncIORequests)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.currentCGIRequests = prometheus.NewDesc(
|
||||
c.webServiceCurrentCGIRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_cgi_requests"),
|
||||
"Current number of CGI requests being simultaneously processed by the Web service (WebService.CurrentCGIRequests)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.currentConnections = prometheus.NewDesc(
|
||||
c.webServiceCurrentConnections = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_connections"),
|
||||
"Current number of connections established with the Web service (WebService.CurrentConnections)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.currentISAPIExtensionRequests = prometheus.NewDesc(
|
||||
c.webServiceCurrentISAPIExtensionRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_isapi_extension_requests"),
|
||||
"Current number of ISAPI requests being simultaneously processed by the Web service (WebService.CurrentISAPIExtensionRequests)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.currentNonAnonymousUsers = prometheus.NewDesc(
|
||||
c.webServiceCurrentNonAnonymousUsers = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_non_anonymous_users"),
|
||||
"Number of users who currently have a non-anonymous connection using the Web service (WebService.CurrentNonAnonymousUsers)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.serviceUptime = prometheus.NewDesc(
|
||||
c.webServiceServiceUptime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "service_uptime"),
|
||||
"Number of seconds the WebService is up (WebService.ServiceUptime)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalBytesReceived = prometheus.NewDesc(
|
||||
c.webServiceTotalBytesReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "received_bytes_total"),
|
||||
"Number of data bytes that have been received by the Web service (WebService.TotalBytesReceived)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalBytesSent = prometheus.NewDesc(
|
||||
c.webServiceTotalBytesSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sent_bytes_total"),
|
||||
"Number of data bytes that have been sent by the Web service (WebService.TotalBytesSent)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalAnonymousUsers = prometheus.NewDesc(
|
||||
c.webServiceTotalAnonymousUsers = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "anonymous_users_total"),
|
||||
"Total number of users who established an anonymous connection with the Web service (WebService.TotalAnonymousUsers)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalBlockedAsyncIORequests = prometheus.NewDesc(
|
||||
c.webServiceTotalBlockedAsyncIORequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "blocked_async_io_requests_total"),
|
||||
"Total requests temporarily blocked due to bandwidth throttling settings (WebService.TotalBlockedAsyncIORequests)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalCGIRequests = prometheus.NewDesc(
|
||||
c.webServiceTotalCGIRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cgi_requests_total"),
|
||||
"Total CGI requests is the total number of CGI requests (WebService.TotalCGIRequests)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalConnectionAttemptsAllInstances = prometheus.NewDesc(
|
||||
c.webServiceTotalConnectionAttemptsAllInstances = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "connection_attempts_all_instances_total"),
|
||||
"Number of connections that have been attempted using the Web service (WebService.TotalConnectionAttemptsAllInstances)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalRequests = prometheus.NewDesc(
|
||||
c.webServiceTotalRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Number of HTTP requests (WebService.TotalRequests)",
|
||||
[]string{"site", "method"},
|
||||
nil,
|
||||
)
|
||||
c.totalFilesReceived = prometheus.NewDesc(
|
||||
c.webServiceTotalFilesReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "files_received_total"),
|
||||
"Number of files received by the Web service (WebService.TotalFilesReceived)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalFilesSent = prometheus.NewDesc(
|
||||
c.webServiceTotalFilesSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "files_sent_total"),
|
||||
"Number of files sent by the Web service (WebService.TotalFilesSent)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalISAPIExtensionRequests = prometheus.NewDesc(
|
||||
c.webServiceTotalISAPIExtensionRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "ipapi_extension_requests_total"),
|
||||
"ISAPI Extension Requests received (WebService.TotalISAPIExtensionRequests)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalLockedErrors = prometheus.NewDesc(
|
||||
c.webServiceTotalLockedErrors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "locked_errors_total"),
|
||||
"Number of requests that couldn't be satisfied by the server because the requested resource was locked (WebService.TotalLockedErrors)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalLogonAttempts = prometheus.NewDesc(
|
||||
c.webServiceTotalLogonAttempts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logon_attempts_total"),
|
||||
"Number of logons attempts to the Web Service (WebService.TotalLogonAttempts)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalNonAnonymousUsers = prometheus.NewDesc(
|
||||
c.webServiceTotalNonAnonymousUsers = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "non_anonymous_users_total"),
|
||||
"Number of users who established a non-anonymous connection with the Web service (WebService.TotalNonAnonymousUsers)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalNotFoundErrors = prometheus.NewDesc(
|
||||
c.webServiceTotalNotFoundErrors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "not_found_errors_total"),
|
||||
"Number of requests that couldn't be satisfied by the server because the requested document could not be found (WebService.TotalNotFoundErrors)",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
)
|
||||
c.totalRejectedAsyncIORequests = prometheus.NewDesc(
|
||||
c.webServiceTotalRejectedAsyncIORequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "rejected_async_io_requests_total"),
|
||||
"Requests rejected due to bandwidth throttling settings (WebService.TotalRejectedAsyncIORequests)",
|
||||
[]string{"site"},
|
||||
@@ -286,240 +286,240 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentAnonymousUsers,
|
||||
c.webServiceCurrentAnonymousUsers,
|
||||
prometheus.GaugeValue,
|
||||
app[CurrentAnonymousUsers].FirstValue,
|
||||
app[webServiceCurrentAnonymousUsers].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentBlockedAsyncIORequests,
|
||||
c.webServiceCurrentBlockedAsyncIORequests,
|
||||
prometheus.GaugeValue,
|
||||
app[CurrentBlockedAsyncIORequests].FirstValue,
|
||||
app[webServiceCurrentBlockedAsyncIORequests].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentCGIRequests,
|
||||
c.webServiceCurrentCGIRequests,
|
||||
prometheus.GaugeValue,
|
||||
app[CurrentCGIRequests].FirstValue,
|
||||
app[webServiceCurrentCGIRequests].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentConnections,
|
||||
c.webServiceCurrentConnections,
|
||||
prometheus.GaugeValue,
|
||||
app[CurrentConnections].FirstValue,
|
||||
app[webServiceCurrentConnections].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentISAPIExtensionRequests,
|
||||
c.webServiceCurrentISAPIExtensionRequests,
|
||||
prometheus.GaugeValue,
|
||||
app[CurrentISAPIExtensionRequests].FirstValue,
|
||||
app[webServiceCurrentISAPIExtensionRequests].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentNonAnonymousUsers,
|
||||
c.webServiceCurrentNonAnonymousUsers,
|
||||
prometheus.GaugeValue,
|
||||
app[CurrentNonAnonymousUsers].FirstValue,
|
||||
app[webServiceCurrentNonAnonymousUsers].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceUptime,
|
||||
c.webServiceServiceUptime,
|
||||
prometheus.GaugeValue,
|
||||
app[ServiceUptime].FirstValue,
|
||||
app[webServiceServiceUptime].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalBytesReceived,
|
||||
c.webServiceTotalBytesReceived,
|
||||
prometheus.CounterValue,
|
||||
app[TotalBytesReceived].FirstValue,
|
||||
app[webServiceTotalBytesReceived].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalBytesSent,
|
||||
c.webServiceTotalBytesSent,
|
||||
prometheus.CounterValue,
|
||||
app[TotalBytesSent].FirstValue,
|
||||
app[webServiceTotalBytesSent].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalAnonymousUsers,
|
||||
c.webServiceTotalAnonymousUsers,
|
||||
prometheus.CounterValue,
|
||||
app[TotalAnonymousUsers].FirstValue,
|
||||
app[webServiceTotalAnonymousUsers].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalBlockedAsyncIORequests,
|
||||
c.webServiceTotalBlockedAsyncIORequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalBlockedAsyncIORequests].FirstValue,
|
||||
app[webServiceTotalBlockedAsyncIORequests].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalCGIRequests,
|
||||
c.webServiceTotalCGIRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalCGIRequests].FirstValue,
|
||||
app[webServiceTotalCGIRequests].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalConnectionAttemptsAllInstances,
|
||||
c.webServiceTotalConnectionAttemptsAllInstances,
|
||||
prometheus.CounterValue,
|
||||
app[TotalConnectionAttemptsAllInstances].FirstValue,
|
||||
app[webServiceTotalConnectionAttemptsAllInstances].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalFilesReceived,
|
||||
c.webServiceTotalFilesReceived,
|
||||
prometheus.CounterValue,
|
||||
app[TotalFilesReceived].FirstValue,
|
||||
app[webServiceTotalFilesReceived].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalFilesSent,
|
||||
c.webServiceTotalFilesSent,
|
||||
prometheus.CounterValue,
|
||||
app[TotalFilesSent].FirstValue,
|
||||
app[webServiceTotalFilesSent].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalISAPIExtensionRequests,
|
||||
c.webServiceTotalISAPIExtensionRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalISAPIExtensionRequests].FirstValue,
|
||||
app[webServiceTotalISAPIExtensionRequests].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalLockedErrors,
|
||||
c.webServiceTotalLockedErrors,
|
||||
prometheus.CounterValue,
|
||||
app[TotalLockedErrors].FirstValue,
|
||||
app[webServiceTotalLockedErrors].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalLogonAttempts,
|
||||
c.webServiceTotalLogonAttempts,
|
||||
prometheus.CounterValue,
|
||||
app[TotalLogonAttempts].FirstValue,
|
||||
app[webServiceTotalLogonAttempts].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalNonAnonymousUsers,
|
||||
c.webServiceTotalNonAnonymousUsers,
|
||||
prometheus.CounterValue,
|
||||
app[TotalNonAnonymousUsers].FirstValue,
|
||||
app[webServiceTotalNonAnonymousUsers].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalNotFoundErrors,
|
||||
c.webServiceTotalNotFoundErrors,
|
||||
prometheus.CounterValue,
|
||||
app[TotalNotFoundErrors].FirstValue,
|
||||
app[webServiceTotalNotFoundErrors].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRejectedAsyncIORequests,
|
||||
c.webServiceTotalRejectedAsyncIORequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalRejectedAsyncIORequests].FirstValue,
|
||||
app[webServiceTotalRejectedAsyncIORequests].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalOtherRequests].FirstValue,
|
||||
app[webServiceTotalOtherRequests].FirstValue,
|
||||
name,
|
||||
"other",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalCopyRequests].FirstValue,
|
||||
app[webServiceTotalCopyRequests].FirstValue,
|
||||
name,
|
||||
"COPY",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalDeleteRequests].FirstValue,
|
||||
app[webServiceTotalDeleteRequests].FirstValue,
|
||||
name,
|
||||
"DELETE",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalGetRequests].FirstValue,
|
||||
app[webServiceTotalGetRequests].FirstValue,
|
||||
name,
|
||||
"GET",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalHeadRequests].FirstValue,
|
||||
app[webServiceTotalHeadRequests].FirstValue,
|
||||
name,
|
||||
"HEAD",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalLockRequests].FirstValue,
|
||||
app[webServiceTotalLockRequests].FirstValue,
|
||||
name,
|
||||
"LOCK",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalMkcolRequests].FirstValue,
|
||||
app[webServiceTotalMkcolRequests].FirstValue,
|
||||
name,
|
||||
"MKCOL",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalMoveRequests].FirstValue,
|
||||
app[webServiceTotalMoveRequests].FirstValue,
|
||||
name,
|
||||
"MOVE",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalOptionsRequests].FirstValue,
|
||||
app[webServiceTotalOptionsRequests].FirstValue,
|
||||
name,
|
||||
"OPTIONS",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalPostRequests].FirstValue,
|
||||
app[webServiceTotalPostRequests].FirstValue,
|
||||
name,
|
||||
"POST",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalPropfindRequests].FirstValue,
|
||||
app[webServiceTotalPropfindRequests].FirstValue,
|
||||
name,
|
||||
"PROPFIND",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalProppatchRequests].FirstValue,
|
||||
app[webServiceTotalProppatchRequests].FirstValue,
|
||||
name,
|
||||
"PROPPATCH",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalPutRequests].FirstValue,
|
||||
app[webServiceTotalPutRequests].FirstValue,
|
||||
name,
|
||||
"PUT",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalSearchRequests].FirstValue,
|
||||
app[webServiceTotalSearchRequests].FirstValue,
|
||||
name,
|
||||
"SEARCH",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalTraceRequests].FirstValue,
|
||||
app[webServiceTotalTraceRequests].FirstValue,
|
||||
name,
|
||||
"TRACE",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRequests,
|
||||
c.webServiceTotalRequests,
|
||||
prometheus.CounterValue,
|
||||
app[TotalUnlockRequests].FirstValue,
|
||||
app[webServiceTotalUnlockRequests].FirstValue,
|
||||
name,
|
||||
"UNLOCK",
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
type collectorWebServiceCache struct {
|
||||
perfDataCollectorWebServiceCache *perfdata.Collector
|
||||
serviceCachePerfDataCollector *perfdata.Collector
|
||||
|
||||
serviceCacheActiveFlushedEntries *prometheus.Desc
|
||||
|
||||
@@ -61,80 +61,80 @@ type collectorWebServiceCache struct {
|
||||
}
|
||||
|
||||
const (
|
||||
ServiceCacheActiveFlushedEntries = "Active Flushed Entries"
|
||||
ServiceCacheCurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
|
||||
ServiceCacheMaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
|
||||
ServiceCacheFileCacheFlushesTotal = "File Cache Flushes"
|
||||
ServiceCacheFileCacheHitsTotal = "File Cache Hits"
|
||||
ServiceCacheFileCacheMissesTotal = "File Cache Misses"
|
||||
ServiceCacheFilesCached = "Current Files Cached"
|
||||
ServiceCacheFilesCachedTotal = "Total Files Cached"
|
||||
ServiceCacheFilesFlushedTotal = "Total Flushed Files"
|
||||
ServiceCacheURICacheFlushesTotal = "Total Flushed URIs"
|
||||
ServiceCacheURICacheFlushesTotalKernel = "Total Flushed URIs"
|
||||
ServiceCacheURIsFlushedTotalKernel = "Kernel: Total Flushed URIs"
|
||||
ServiceCacheURICacheHitsTotal = "URI Cache Hits"
|
||||
ServiceCacheURICacheHitsTotalKernel = "Kernel: URI Cache Hits"
|
||||
ServiceCacheURICacheMissesTotal = "URI Cache Misses"
|
||||
ServiceCacheURICacheMissesTotalKernel = "Kernel: URI Cache Misses"
|
||||
ServiceCacheURIsCached = "Current URIs Cached"
|
||||
ServiceCacheURIsCachedKernel = "Kernel: Current URIs Cached"
|
||||
ServiceCacheURIsCachedTotal = "Total URIs Cached"
|
||||
ServiceCacheURIsCachedTotalKernel = "Total URIs Cached"
|
||||
ServiceCacheURIsFlushedTotal = "Total Flushed URIs"
|
||||
ServiceCacheMetaDataCacheHits = "Metadata Cache Hits"
|
||||
ServiceCacheMetaDataCacheMisses = "Metadata Cache Misses"
|
||||
ServiceCacheMetadataCached = "Current Metadata Cached"
|
||||
ServiceCacheMetadataCacheFlushes = "Metadata Cache Flushes"
|
||||
ServiceCacheMetadataCachedTotal = "Total Metadata Cached"
|
||||
ServiceCacheMetadataFlushedTotal = "Total Flushed Metadata"
|
||||
ServiceCacheOutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
|
||||
ServiceCacheOutputCacheItems = "Output Cache Current Items"
|
||||
ServiceCacheOutputCacheMemoryUsage = "Output Cache Current Memory Usage"
|
||||
ServiceCacheOutputCacheHitsTotal = "Output Cache Total Hits"
|
||||
ServiceCacheOutputCacheMissesTotal = "Output Cache Total Misses"
|
||||
ServiceCacheOutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
|
||||
ServiceCacheOutputCacheFlushesTotal = "Output Cache Total Flushes"
|
||||
serviceCacheActiveFlushedEntries = "Active Flushed Entries"
|
||||
serviceCacheCurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
|
||||
serviceCacheMaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
|
||||
serviceCacheFileCacheFlushesTotal = "File Cache Flushes"
|
||||
serviceCacheFileCacheHitsTotal = "File Cache Hits"
|
||||
serviceCacheFileCacheMissesTotal = "File Cache Misses"
|
||||
serviceCacheFilesCached = "Current Files Cached"
|
||||
serviceCacheFilesCachedTotal = "Total Files Cached"
|
||||
serviceCacheFilesFlushedTotal = "Total Flushed Files"
|
||||
serviceCacheURICacheFlushesTotal = "Total Flushed URIs"
|
||||
serviceCacheURICacheFlushesTotalKernel = "Total Flushed URIs"
|
||||
serviceCacheURIsFlushedTotalKernel = "Kernel: Total Flushed URIs"
|
||||
serviceCacheURICacheHitsTotal = "URI Cache Hits"
|
||||
serviceCacheURICacheHitsTotalKernel = "Kernel: URI Cache Hits"
|
||||
serviceCacheURICacheMissesTotal = "URI Cache Misses"
|
||||
serviceCacheURICacheMissesTotalKernel = "Kernel: URI Cache Misses"
|
||||
serviceCacheURIsCached = "Current URIs Cached"
|
||||
serviceCacheURIsCachedKernel = "Kernel: Current URIs Cached"
|
||||
serviceCacheURIsCachedTotal = "Total URIs Cached"
|
||||
serviceCacheURIsCachedTotalKernel = "Total URIs Cached"
|
||||
serviceCacheURIsFlushedTotal = "Total Flushed URIs"
|
||||
serviceCacheMetaDataCacheHits = "Metadata Cache Hits"
|
||||
serviceCacheMetaDataCacheMisses = "Metadata Cache Misses"
|
||||
serviceCacheMetadataCached = "Current Metadata Cached"
|
||||
serviceCacheMetadataCacheFlushes = "Metadata Cache Flushes"
|
||||
serviceCacheMetadataCachedTotal = "Total Metadata Cached"
|
||||
serviceCacheMetadataFlushedTotal = "Total Flushed Metadata"
|
||||
serviceCacheOutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
|
||||
serviceCacheOutputCacheItems = "Output Cache Current Items"
|
||||
serviceCacheOutputCacheMemoryUsage = "Output Cache Current Memory Usage"
|
||||
serviceCacheOutputCacheHitsTotal = "Output Cache Total Hits"
|
||||
serviceCacheOutputCacheMissesTotal = "Output Cache Total Misses"
|
||||
serviceCacheOutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
|
||||
serviceCacheOutputCacheFlushesTotal = "Output Cache Total Flushes"
|
||||
)
|
||||
|
||||
func (c *Collector) buildWebServiceCache() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service Cache", perfdata.InstancesAll, []string{
|
||||
ServiceCacheActiveFlushedEntries,
|
||||
ServiceCacheCurrentFileCacheMemoryUsage,
|
||||
ServiceCacheMaximumFileCacheMemoryUsage,
|
||||
ServiceCacheFileCacheFlushesTotal,
|
||||
ServiceCacheFileCacheHitsTotal,
|
||||
ServiceCacheFileCacheMissesTotal,
|
||||
ServiceCacheFilesCached,
|
||||
ServiceCacheFilesCachedTotal,
|
||||
ServiceCacheFilesFlushedTotal,
|
||||
ServiceCacheURICacheFlushesTotal,
|
||||
ServiceCacheURICacheFlushesTotalKernel,
|
||||
ServiceCacheURIsFlushedTotalKernel,
|
||||
ServiceCacheURICacheHitsTotal,
|
||||
ServiceCacheURICacheHitsTotalKernel,
|
||||
ServiceCacheURICacheMissesTotal,
|
||||
ServiceCacheURICacheMissesTotalKernel,
|
||||
ServiceCacheURIsCached,
|
||||
ServiceCacheURIsCachedKernel,
|
||||
ServiceCacheURIsCachedTotal,
|
||||
ServiceCacheURIsCachedTotalKernel,
|
||||
ServiceCacheURIsFlushedTotal,
|
||||
ServiceCacheMetaDataCacheHits,
|
||||
ServiceCacheMetaDataCacheMisses,
|
||||
ServiceCacheMetadataCached,
|
||||
ServiceCacheMetadataCacheFlushes,
|
||||
ServiceCacheMetadataCachedTotal,
|
||||
ServiceCacheMetadataFlushedTotal,
|
||||
ServiceCacheOutputCacheActiveFlushedItems,
|
||||
ServiceCacheOutputCacheItems,
|
||||
ServiceCacheOutputCacheMemoryUsage,
|
||||
ServiceCacheOutputCacheHitsTotal,
|
||||
ServiceCacheOutputCacheMissesTotal,
|
||||
ServiceCacheOutputCacheFlushedItemsTotal,
|
||||
ServiceCacheOutputCacheFlushesTotal,
|
||||
serviceCacheActiveFlushedEntries,
|
||||
serviceCacheCurrentFileCacheMemoryUsage,
|
||||
serviceCacheMaximumFileCacheMemoryUsage,
|
||||
serviceCacheFileCacheFlushesTotal,
|
||||
serviceCacheFileCacheHitsTotal,
|
||||
serviceCacheFileCacheMissesTotal,
|
||||
serviceCacheFilesCached,
|
||||
serviceCacheFilesCachedTotal,
|
||||
serviceCacheFilesFlushedTotal,
|
||||
serviceCacheURICacheFlushesTotal,
|
||||
serviceCacheURICacheFlushesTotalKernel,
|
||||
serviceCacheURIsFlushedTotalKernel,
|
||||
serviceCacheURICacheHitsTotal,
|
||||
serviceCacheURICacheHitsTotalKernel,
|
||||
serviceCacheURICacheMissesTotal,
|
||||
serviceCacheURICacheMissesTotalKernel,
|
||||
serviceCacheURIsCached,
|
||||
serviceCacheURIsCachedKernel,
|
||||
serviceCacheURIsCachedTotal,
|
||||
serviceCacheURIsCachedTotalKernel,
|
||||
serviceCacheURIsFlushedTotal,
|
||||
serviceCacheMetaDataCacheHits,
|
||||
serviceCacheMetaDataCacheMisses,
|
||||
serviceCacheMetadataCached,
|
||||
serviceCacheMetadataCacheFlushes,
|
||||
serviceCacheMetadataCachedTotal,
|
||||
serviceCacheMetadataFlushedTotal,
|
||||
serviceCacheOutputCacheActiveFlushedItems,
|
||||
serviceCacheOutputCacheItems,
|
||||
serviceCacheOutputCacheMemoryUsage,
|
||||
serviceCacheOutputCacheHitsTotal,
|
||||
serviceCacheOutputCacheMissesTotal,
|
||||
serviceCacheOutputCacheFlushedItemsTotal,
|
||||
serviceCacheOutputCacheFlushesTotal,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Web Service Cache collector: %w", err)
|
||||
@@ -329,184 +329,184 @@ func (c *Collector) collectWebServiceCache(ch chan<- prometheus.Metric) error {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheActiveFlushedEntries,
|
||||
prometheus.GaugeValue,
|
||||
app[ServiceCacheActiveFlushedEntries].FirstValue,
|
||||
app[serviceCacheActiveFlushedEntries].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheCurrentFileCacheMemoryUsage,
|
||||
prometheus.GaugeValue,
|
||||
app[ServiceCacheCurrentFileCacheMemoryUsage].FirstValue,
|
||||
app[serviceCacheCurrentFileCacheMemoryUsage].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheMaximumFileCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheMaximumFileCacheMemoryUsage].FirstValue,
|
||||
app[serviceCacheMaximumFileCacheMemoryUsage].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheFileCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheFileCacheFlushesTotal].FirstValue,
|
||||
app[serviceCacheFileCacheFlushesTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheFileCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheFileCacheHitsTotal].FirstValue+app[ServiceCacheFileCacheMissesTotal].FirstValue,
|
||||
app[serviceCacheFileCacheHitsTotal].FirstValue+app[serviceCacheFileCacheMissesTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheFileCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheFileCacheHitsTotal].FirstValue,
|
||||
app[serviceCacheFileCacheHitsTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheFilesCached,
|
||||
prometheus.GaugeValue,
|
||||
app[ServiceCacheFilesCached].FirstValue,
|
||||
app[serviceCacheFilesCached].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheFilesCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheFilesCachedTotal].FirstValue,
|
||||
app[serviceCacheFilesCachedTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheFilesFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheFilesFlushedTotal].FirstValue,
|
||||
app[serviceCacheFilesFlushedTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURICacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURICacheFlushesTotal].FirstValue,
|
||||
app[serviceCacheURICacheFlushesTotal].FirstValue,
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURICacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURICacheFlushesTotalKernel].FirstValue,
|
||||
app[serviceCacheURICacheFlushesTotalKernel].FirstValue,
|
||||
"kernel",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURICacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURICacheHitsTotal].FirstValue+app[ServiceCacheURICacheMissesTotal].FirstValue,
|
||||
app[serviceCacheURICacheHitsTotal].FirstValue+app[serviceCacheURICacheMissesTotal].FirstValue,
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURICacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURICacheHitsTotalKernel].FirstValue+app[ServiceCacheURICacheMissesTotalKernel].FirstValue,
|
||||
app[serviceCacheURICacheHitsTotalKernel].FirstValue+app[serviceCacheURICacheMissesTotalKernel].FirstValue,
|
||||
"kernel",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURICacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURICacheHitsTotal].FirstValue,
|
||||
app[serviceCacheURICacheHitsTotal].FirstValue,
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURICacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURICacheHitsTotalKernel].FirstValue,
|
||||
app[serviceCacheURICacheHitsTotalKernel].FirstValue,
|
||||
"kernel",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURIsCached,
|
||||
prometheus.GaugeValue,
|
||||
app[ServiceCacheURIsCached].FirstValue,
|
||||
app[serviceCacheURIsCached].FirstValue,
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURIsCached,
|
||||
prometheus.GaugeValue,
|
||||
app[ServiceCacheURIsCachedKernel].FirstValue,
|
||||
app[serviceCacheURIsCachedKernel].FirstValue,
|
||||
"kernel",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURIsCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURIsCachedTotal].FirstValue,
|
||||
app[serviceCacheURIsCachedTotal].FirstValue,
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURIsCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURIsCachedTotalKernel].FirstValue,
|
||||
app[serviceCacheURIsCachedTotalKernel].FirstValue,
|
||||
"kernel",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURIsFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURIsFlushedTotal].FirstValue,
|
||||
app[serviceCacheURIsFlushedTotal].FirstValue,
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheURIsFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheURIsFlushedTotalKernel].FirstValue,
|
||||
app[serviceCacheURIsFlushedTotalKernel].FirstValue,
|
||||
"kernel",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheMetadataCached,
|
||||
prometheus.GaugeValue,
|
||||
app[ServiceCacheMetadataCached].FirstValue,
|
||||
app[serviceCacheMetadataCached].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheMetadataCacheFlushes,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheMetadataCacheFlushes].FirstValue,
|
||||
app[serviceCacheMetadataCacheFlushes].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheMetadataCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheMetaDataCacheHits].FirstValue+app[ServiceCacheMetaDataCacheMisses].FirstValue,
|
||||
app[serviceCacheMetaDataCacheHits].FirstValue+app[serviceCacheMetaDataCacheMisses].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheMetadataCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
0, // app[ServiceCacheMetadataCacheHitsTotal].FirstValue,
|
||||
0, // app[serviceCacheMetadataCacheHitsTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheMetadataCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheMetadataCachedTotal].FirstValue,
|
||||
app[serviceCacheMetadataCachedTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheMetadataFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheMetadataFlushedTotal].FirstValue,
|
||||
app[serviceCacheMetadataFlushedTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheOutputCacheActiveFlushedItems,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheOutputCacheActiveFlushedItems].FirstValue,
|
||||
app[serviceCacheOutputCacheActiveFlushedItems].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheOutputCacheItems,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheOutputCacheItems].FirstValue,
|
||||
app[serviceCacheOutputCacheItems].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheOutputCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheOutputCacheMemoryUsage].FirstValue,
|
||||
app[serviceCacheOutputCacheMemoryUsage].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheOutputCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheOutputCacheHitsTotal].FirstValue+app[ServiceCacheOutputCacheMissesTotal].FirstValue,
|
||||
app[serviceCacheOutputCacheHitsTotal].FirstValue+app[serviceCacheOutputCacheMissesTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheOutputCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheOutputCacheHitsTotal].FirstValue,
|
||||
app[serviceCacheOutputCacheHitsTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheOutputCacheFlushedItemsTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheOutputCacheFlushedItemsTotal].FirstValue,
|
||||
app[serviceCacheOutputCacheFlushedItemsTotal].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.serviceCacheOutputCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
app[ServiceCacheOutputCacheFlushesTotal].FirstValue,
|
||||
app[serviceCacheOutputCacheFlushesTotal].FirstValue,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
|
||||
const Name = "license"
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var labelMap = map[slc.SL_GENUINE_STATE]string{
|
||||
slc.SL_GEN_STATE_IS_GENUINE: "genuine",
|
||||
slc.SL_GEN_STATE_INVALID_LICENSE: "invalid_license",
|
||||
@@ -37,6 +38,7 @@ var labelMap = map[slc.SL_GENUINE_STATE]string{
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
|
||||
|
||||
@@ -39,6 +39,7 @@ type Config struct {
|
||||
VolumeExclude *regexp.Regexp `yaml:"volume_exclude"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
VolumeInclude: types.RegExpAny,
|
||||
VolumeExclude: types.RegExpEmpty,
|
||||
|
||||
@@ -30,6 +30,7 @@ const Name = "logon"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
|
||||
@@ -35,6 +35,7 @@ const Name = "memory"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for perflib Memory metrics.
|
||||
@@ -430,7 +431,7 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for Memory returned empty result set")
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -42,6 +42,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
subCollectorCluster,
|
||||
|
||||
@@ -662,6 +662,11 @@ func (c *Collector) buildCluster() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []msClusterCluster
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, c.clusterMIQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -86,6 +86,12 @@ func (c *Collector) buildNetwork() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []msClusterNetwork
|
||||
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, c.networkMIQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -158,6 +158,12 @@ func (c *Collector) buildNode() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []msClusterNode
|
||||
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, c.nodeMIQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -190,6 +190,12 @@ func (c *Collector) buildResource() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []msClusterResource
|
||||
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, c.resourceMIQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -164,6 +164,12 @@ func (c *Collector) buildResourceGroup() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var dst []msClusterResourceGroup
|
||||
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, c.resourceGroupMIQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
package msmq
|
||||
|
||||
const (
|
||||
BytesInJournalQueue = "Bytes in Journal Queue"
|
||||
BytesInQueue = "Bytes in Queue"
|
||||
MessagesInJournalQueue = "Messages in Journal Queue"
|
||||
MessagesInQueue = "Messages in Queue"
|
||||
bytesInJournalQueue = "Bytes in Journal Queue"
|
||||
bytesInQueue = "Bytes in Queue"
|
||||
messagesInJournalQueue = "Messages in Journal Queue"
|
||||
messagesInQueue = "Messages in Queue"
|
||||
)
|
||||
|
||||
@@ -30,6 +30,7 @@ const Name = "msmq"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics.
|
||||
@@ -75,10 +76,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("MSMQ Queue", perfdata.InstancesAll, []string{
|
||||
BytesInJournalQueue,
|
||||
BytesInQueue,
|
||||
MessagesInJournalQueue,
|
||||
MessagesInQueue,
|
||||
bytesInJournalQueue,
|
||||
bytesInQueue,
|
||||
messagesInJournalQueue,
|
||||
messagesInQueue,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
|
||||
@@ -124,28 +125,28 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesInJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
data[BytesInJournalQueue].FirstValue,
|
||||
data[bytesInJournalQueue].FirstValue,
|
||||
name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesInQueue,
|
||||
prometheus.GaugeValue,
|
||||
data[BytesInQueue].FirstValue,
|
||||
data[bytesInQueue].FirstValue,
|
||||
name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.messagesInJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
data[MessagesInJournalQueue].FirstValue,
|
||||
data[messagesInJournalQueue].FirstValue,
|
||||
name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.messagesInQueue,
|
||||
prometheus.GaugeValue,
|
||||
data[MessagesInQueue].FirstValue,
|
||||
data[messagesInQueue].FirstValue,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ type Config struct {
|
||||
Port uint16 `yaml:"port"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
subCollectorAccessMethods,
|
||||
@@ -175,7 +176,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
fileVersion, productVersion, err := c.getMSSQLServerVersion(c.config.Port)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to get MSSQL server version",
|
||||
logger.Warn("failed to get MSSQL server version",
|
||||
slog.Any("err", err),
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
@@ -334,7 +335,7 @@ func (c *Collector) getMSSQLInstances() mssqlInstancesType {
|
||||
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, regKey, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
c.logger.Warn("Couldn't open registry to determine SQL instances",
|
||||
c.logger.Warn("couldn't open registry to determine SQL instances",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -343,7 +344,7 @@ func (c *Collector) getMSSQLInstances() mssqlInstancesType {
|
||||
|
||||
defer func(key registry.Key) {
|
||||
if err := key.Close(); err != nil {
|
||||
c.logger.Warn("Failed to close registry key",
|
||||
c.logger.Warn("failed to close registry key",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
@@ -351,7 +352,7 @@ func (c *Collector) getMSSQLInstances() mssqlInstancesType {
|
||||
|
||||
instanceNames, err := k.ReadValueNames(0)
|
||||
if err != nil {
|
||||
c.logger.Warn("Can't ReadSubKeyNames",
|
||||
c.logger.Warn("can't ReadSubKeyNames",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@@ -364,7 +365,7 @@ func (c *Collector) getMSSQLInstances() mssqlInstancesType {
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Debug(fmt.Sprintf("Detected MSSQL Instances: %#v\n", sqlInstances))
|
||||
c.logger.Debug(fmt.Sprintf("detected MSSQL Instances: %#v\n", sqlInstances))
|
||||
|
||||
return sqlInstances
|
||||
}
|
||||
@@ -401,23 +402,23 @@ func (c *Collector) collect(
|
||||
begin := time.Now()
|
||||
success := 1.0
|
||||
err := collectFn(ch, sqlInstance, perfDataCollector)
|
||||
duration := time.Since(begin).Seconds()
|
||||
duration := time.Since(begin)
|
||||
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, perfdata.ErrNoData) {
|
||||
errs = append(errs, err)
|
||||
success = 0.0
|
||||
|
||||
c.logger.Error(fmt.Sprintf("mssql class collector %s failed after %fs", collector, duration),
|
||||
c.logger.Error(fmt.Sprintf("mssql class collector %s for instance %s failed after %s", collector, sqlInstance, duration),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
} else {
|
||||
c.logger.Debug(fmt.Sprintf("mssql class collector %s succeeded after %fs.", collector, duration))
|
||||
c.logger.Debug(fmt.Sprintf("mssql class collector %s for instance %s succeeded after %s.", collector, sqlInstance, duration))
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mssqlScrapeDurationDesc,
|
||||
prometheus.GaugeValue,
|
||||
duration,
|
||||
duration.Seconds(),
|
||||
collector, sqlInstance,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -123,6 +124,7 @@ func (c *Collector) buildAccessMethods() error {
|
||||
var err error
|
||||
|
||||
c.accessMethodsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
accessMethodsAUCleanupbatchesPerSec,
|
||||
accessMethodsAUCleanupsPerSec,
|
||||
@@ -173,7 +175,7 @@ func (c *Collector) buildAccessMethods() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.accessMethodsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,7 +445,7 @@ func (c *Collector) buildAccessMethods() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error {
|
||||
@@ -451,6 +453,10 @@ func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -54,6 +55,7 @@ func (c *Collector) buildAvailabilityReplica() error {
|
||||
var err error
|
||||
|
||||
c.availabilityReplicaPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
availReplicaBytesReceivedFromReplicaPerSec,
|
||||
availReplicaBytesSentToReplicaPerSec,
|
||||
@@ -69,7 +71,7 @@ func (c *Collector) buildAvailabilityReplica() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +131,7 @@ func (c *Collector) buildAvailabilityReplica() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) error {
|
||||
@@ -137,6 +139,10 @@ func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) erro
|
||||
}
|
||||
|
||||
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -81,6 +82,7 @@ func (c *Collector) buildBufferManager() error {
|
||||
var err error
|
||||
|
||||
c.bufManPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
bufManBackgroundWriterPagesPerSec,
|
||||
bufManBufferCacheHitRatio,
|
||||
@@ -110,7 +112,7 @@ func (c *Collector) buildBufferManager() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.bufManPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,7 +255,7 @@ func (c *Collector) buildBufferManager() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error {
|
||||
@@ -261,6 +263,10 @@ func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -131,6 +132,7 @@ func (c *Collector) buildDatabases() error {
|
||||
var err error
|
||||
|
||||
c.databasesPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
databasesActiveParallelRedoThreads,
|
||||
databasesActiveTransactions,
|
||||
@@ -185,7 +187,7 @@ func (c *Collector) buildDatabases() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.databasesPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Databases"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -478,7 +480,7 @@ func (c *Collector) buildDatabases() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error {
|
||||
@@ -486,6 +488,10 @@ func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -83,6 +84,7 @@ func (c *Collector) buildDatabaseReplica() error {
|
||||
var err error
|
||||
|
||||
c.dbReplicaPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
dbReplicaDatabaseFlowControlDelay,
|
||||
dbReplicaDatabaseFlowControlsPerSec,
|
||||
@@ -113,7 +115,7 @@ func (c *Collector) buildDatabaseReplica() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.dbReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +265,7 @@ func (c *Collector) buildDatabaseReplica() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error {
|
||||
@@ -271,6 +273,10 @@ func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -83,6 +84,7 @@ func (c *Collector) buildGeneralStatistics() error {
|
||||
var err error
|
||||
|
||||
c.genStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
genStatsActiveTempTables,
|
||||
genStatsConnectionResetPerSec,
|
||||
@@ -113,7 +115,7 @@ func (c *Collector) buildGeneralStatistics() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +265,7 @@ func (c *Collector) buildGeneralStatistics() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error {
|
||||
@@ -271,6 +273,10 @@ func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error
|
||||
}
|
||||
|
||||
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -52,6 +53,7 @@ func (c *Collector) buildLocks() error {
|
||||
var err error
|
||||
|
||||
c.locksPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
locksAverageWaitTimeMS,
|
||||
locksAverageWaitTimeMSBase,
|
||||
@@ -66,7 +68,7 @@ func (c *Collector) buildLocks() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.locksPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Locks"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +121,7 @@ func (c *Collector) buildLocks() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error {
|
||||
@@ -127,6 +129,10 @@ func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Locks"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -75,6 +76,7 @@ func (c *Collector) buildMemoryManager() error {
|
||||
var err error
|
||||
|
||||
c.memMgrPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
memMgrConnectionMemoryKB,
|
||||
memMgrDatabaseCacheMemoryKB,
|
||||
@@ -101,7 +103,7 @@ func (c *Collector) buildMemoryManager() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.memMgrPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,7 +228,7 @@ func (c *Collector) buildMemoryManager() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error {
|
||||
@@ -234,6 +236,10 @@ func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -38,6 +39,7 @@ func (c *Collector) buildSQLErrors() error {
|
||||
var err error
|
||||
|
||||
c.genStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
sqlErrorsErrorsPerSec,
|
||||
}
|
||||
@@ -45,7 +47,7 @@ func (c *Collector) buildSQLErrors() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +59,7 @@ func (c *Collector) buildSQLErrors() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error {
|
||||
@@ -65,6 +67,10 @@ func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -57,6 +58,7 @@ func (c *Collector) buildSQLStats() error {
|
||||
var err error
|
||||
|
||||
c.genStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
sqlStatsAutoParamAttemptsPerSec,
|
||||
sqlStatsBatchRequestsPerSec,
|
||||
@@ -74,7 +76,7 @@ func (c *Collector) buildSQLStats() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), nil, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,7 +147,7 @@ func (c *Collector) buildSQLStats() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error {
|
||||
@@ -153,6 +155,10 @@ func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -61,6 +62,7 @@ func (c *Collector) buildTransactions() error {
|
||||
var err error
|
||||
|
||||
c.transactionsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
transactionsFreeSpaceintempdbKB,
|
||||
transactionsLongestTransactionRunningTime,
|
||||
@@ -80,7 +82,7 @@ func (c *Collector) buildTransactions() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.transactionsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), nil, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,7 +165,7 @@ func (c *Collector) buildTransactions() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error {
|
||||
@@ -173,6 +175,10 @@ func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error {
|
||||
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
|
||||
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
@@ -59,6 +60,7 @@ func (c *Collector) buildWaitStats() error {
|
||||
var err error
|
||||
|
||||
c.waitStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
|
||||
errs := make([]error, 0, len(c.mssqlInstances))
|
||||
counters := []string{
|
||||
waitStatsLockWaits,
|
||||
waitStatsMemoryGrantQueueWaits,
|
||||
@@ -77,7 +79,7 @@ func (c *Collector) buildWaitStats() error {
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.waitStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance, err)
|
||||
errs = append(errs, fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,7 +157,7 @@ func (c *Collector) buildWaitStats() error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error {
|
||||
@@ -163,6 +165,10 @@ func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
|
||||
if perfDataCollector == nil {
|
||||
return types.ErrCollectorNotInitialized
|
||||
}
|
||||
|
||||
perfData, err := perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), err)
|
||||
|
||||
@@ -41,6 +41,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
NicExclude: types.RegExpEmpty,
|
||||
NicInclude: types.RegExpAny,
|
||||
@@ -392,6 +393,7 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var addressFamily = map[uint16]string{
|
||||
windows.AF_INET: "ipv4",
|
||||
windows.AF_INET6: "ipv6",
|
||||
|
||||
@@ -19,7 +19,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
@@ -32,6 +33,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
collectorClrExceptions,
|
||||
@@ -61,6 +63,8 @@ type Collector struct {
|
||||
config Config
|
||||
miSession *mi.Session
|
||||
|
||||
collectorFns []func(ch chan<- prometheus.Metric) error
|
||||
|
||||
// clrexceptions
|
||||
numberOfExceptionsThrown *prometheus.Desc
|
||||
numberOfFilters *prometheus.Desc
|
||||
@@ -152,42 +156,68 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if len(c.config.CollectorsEnabled) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
c.miSession = miSession
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrExceptions) {
|
||||
c.buildClrExceptions()
|
||||
c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled))
|
||||
|
||||
subCollectors := map[string]struct {
|
||||
build func()
|
||||
collect func(ch chan<- prometheus.Metric) error
|
||||
close func()
|
||||
}{
|
||||
collectorClrExceptions: {
|
||||
build: c.buildClrExceptions,
|
||||
collect: c.collectClrExceptions,
|
||||
},
|
||||
collectorClrJIT: {
|
||||
build: c.buildClrJIT,
|
||||
collect: c.collectClrJIT,
|
||||
},
|
||||
collectorClrLoading: {
|
||||
build: c.buildClrLoading,
|
||||
collect: c.collectClrLoading,
|
||||
},
|
||||
collectorClrInterop: {
|
||||
build: c.buildClrInterop,
|
||||
collect: c.collectClrInterop,
|
||||
},
|
||||
collectorClrLocksAndThreads: {
|
||||
build: c.buildClrLocksAndThreads,
|
||||
collect: c.collectClrLocksAndThreads,
|
||||
},
|
||||
collectorClrMemory: {
|
||||
build: c.buildClrMemory,
|
||||
collect: c.collectClrMemory,
|
||||
},
|
||||
collectorClrRemoting: {
|
||||
build: c.buildClrRemoting,
|
||||
collect: c.collectClrRemoting,
|
||||
},
|
||||
collectorClrSecurity: {
|
||||
build: c.buildClrSecurity,
|
||||
collect: c.collectClrSecurity,
|
||||
},
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrInterop) {
|
||||
c.buildClrInterop()
|
||||
}
|
||||
// Result must order, to prevent test failures.
|
||||
sort.Strings(c.config.CollectorsEnabled)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrJIT) {
|
||||
c.buildClrJIT()
|
||||
}
|
||||
for _, name := range c.config.CollectorsEnabled {
|
||||
if _, ok := subCollectors[name]; !ok {
|
||||
return fmt.Errorf("unknown collector: %s", name)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLoading) {
|
||||
c.buildClrLoading()
|
||||
}
|
||||
subCollectors[name].build()
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLocksAndThreads) {
|
||||
c.buildClrLocksAndThreads()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrMemory) {
|
||||
c.buildClrMemory()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrRemoting) {
|
||||
c.buildClrRemoting()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrSecurity) {
|
||||
c.buildClrSecurity()
|
||||
c.collectorFns = append(c.collectorFns, subCollectors[name].collect)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -196,57 +226,29 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
var (
|
||||
err error
|
||||
errs []error
|
||||
)
|
||||
errCh := make(chan error, len(c.collectorFns))
|
||||
errs := make([]error, 0, len(c.collectorFns))
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrExceptions) {
|
||||
if err = c.collectClrExceptions(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrExceptions, err))
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, fn := range c.collectorFns {
|
||||
wg.Add(1)
|
||||
|
||||
go func(fn func(ch chan<- prometheus.Metric) error) {
|
||||
defer wg.Done()
|
||||
|
||||
if err := fn(ch); err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
}(fn)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrInterop) {
|
||||
if err = c.collectClrInterop(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrInterop, err))
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrJIT) {
|
||||
if err = c.collectClrJIT(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrJIT, err))
|
||||
}
|
||||
}
|
||||
close(errCh)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLoading) {
|
||||
if err = c.collectClrLoading(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrLoading, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLocksAndThreads) {
|
||||
if err = c.collectClrLocksAndThreads(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrLocksAndThreads, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrMemory) {
|
||||
if err = c.collectClrMemory(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrMemory, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrRemoting) {
|
||||
if err = c.collectClrRemoting(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrRemoting, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrSecurity) {
|
||||
if err = c.collectClrSecurity(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrSecurity, err))
|
||||
}
|
||||
for err := range errCh {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
|
||||
@@ -31,6 +31,7 @@ const Name = "nps"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
@@ -313,7 +314,7 @@ func (c *Collector) collectAccept(ch chan<- prometheus.Metric) error {
|
||||
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for NPS Authentication Server returned empty result set")
|
||||
return fmt.Errorf("failed to collect NPS Authentication Server metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
@@ -405,7 +406,7 @@ func (c *Collector) collectAccounting(ch chan<- prometheus.Metric) error {
|
||||
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for NPS Accounting Server returned empty result set")
|
||||
return fmt.Errorf("failed to collect NPS Accounting Server metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -37,6 +37,7 @@ const Name = "os"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
@@ -109,26 +110,23 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
|
||||
workstationInfo, err := netapi32.GetWorkstationInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get workstation info: %w", err)
|
||||
}
|
||||
|
||||
productName, buildNumber, revision, err := c.getWindowsVersion()
|
||||
productName, revision, err := c.getWindowsVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get Windows version: %w", err)
|
||||
}
|
||||
|
||||
version := windows.RtlGetVersion()
|
||||
|
||||
c.osInformation = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
`Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \"10\"; a build number greater than 22000 represents Windows 11.`,
|
||||
nil,
|
||||
prometheus.Labels{
|
||||
"product": productName,
|
||||
"version": fmt.Sprintf("%d.%d.%s", workstationInfo.VersionMajor, workstationInfo.VersionMinor, buildNumber),
|
||||
"major_version": strconv.FormatUint(uint64(workstationInfo.VersionMajor), 10),
|
||||
"minor_version": strconv.FormatUint(uint64(workstationInfo.VersionMinor), 10),
|
||||
"build_number": buildNumber,
|
||||
"version": fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.BuildNumber),
|
||||
"major_version": strconv.FormatUint(uint64(version.MajorVersion), 10),
|
||||
"minor_version": strconv.FormatUint(uint64(version.MinorVersion), 10),
|
||||
"build_number": strconv.FormatUint(uint64(version.BuildNumber), 10),
|
||||
"revision": revision,
|
||||
},
|
||||
)
|
||||
@@ -352,31 +350,26 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) {
|
||||
)
|
||||
}
|
||||
|
||||
func (c *Collector) getWindowsVersion() (string, string, string, error) {
|
||||
func (c *Collector) getWindowsVersion() (string, string, error) {
|
||||
// Get build number and product name from registry
|
||||
ntKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("failed to open registry key: %w", err)
|
||||
return "", "", fmt.Errorf("failed to open registry key: %w", err)
|
||||
}
|
||||
|
||||
defer ntKey.Close()
|
||||
|
||||
productName, _, err := ntKey.GetStringValue("ProductName")
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
buildNumber, _, err := ntKey.GetStringValue("CurrentBuildNumber")
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
revision, _, err := ntKey.GetIntegerValue("UBR")
|
||||
if errors.Is(err, registry.ErrNotExist) {
|
||||
revision = 0
|
||||
} else if err != nil {
|
||||
return "", "", "", err
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return productName, buildNumber, strconv.FormatUint(revision, 10), nil
|
||||
return productName, strconv.FormatUint(revision, 10), nil
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ const Name = "pagefile"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
|
||||
@@ -36,6 +36,7 @@ type Config struct {
|
||||
Objects []Object `yaml:"objects"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
Objects: make([]Object, 0),
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ type Config struct {
|
||||
DiskExclude *regexp.Regexp `yaml:"disk_exclude"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
DiskInclude: types.RegExpAny,
|
||||
DiskExclude: types.RegExpEmpty,
|
||||
|
||||
@@ -31,6 +31,8 @@ import (
|
||||
const Name = "printer"
|
||||
|
||||
// printerStatusMap source: https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-printer#:~:text=Power%20Save-,PrinterStatus,Offline%20(7),-PrintJobDataType
|
||||
//
|
||||
//nolint:gochecknoglobals
|
||||
var printerStatusMap = map[uint16]string{
|
||||
1: "Other",
|
||||
2: "Unknown",
|
||||
@@ -46,6 +48,7 @@ type Config struct {
|
||||
PrinterExclude *regexp.Regexp `yaml:"printer_exclude"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
PrinterInclude: types.RegExpAny,
|
||||
PrinterExclude: types.RegExpEmpty,
|
||||
|
||||
@@ -20,8 +20,10 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
@@ -40,6 +42,7 @@ type Config struct {
|
||||
EnableWorkerProcess bool `yaml:"enable_iis_worker_process"` //nolint:tagliatelle
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
ProcessInclude: types.RegExpAny,
|
||||
ProcessExclude: types.RegExpEmpty,
|
||||
@@ -56,7 +59,10 @@ type Collector struct {
|
||||
|
||||
perfDataCollector *perfdata.Collector
|
||||
|
||||
lookupCache map[string]string
|
||||
lookupCache sync.Map
|
||||
|
||||
workerCh chan processWorkerRequest
|
||||
mu sync.RWMutex
|
||||
|
||||
info *prometheus.Desc
|
||||
cpuTimeTotal *prometheus.Desc
|
||||
@@ -76,6 +82,14 @@ type Collector struct {
|
||||
workingSetPrivate *prometheus.Desc
|
||||
}
|
||||
|
||||
type processWorkerRequest struct {
|
||||
ch chan<- prometheus.Metric
|
||||
name string
|
||||
performanceCounterValues map[string]perfdata.CounterValue
|
||||
waitGroup *sync.WaitGroup
|
||||
workerProcesses []WorkerProcess
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
@@ -115,7 +129,7 @@ func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
|
||||
app.Flag(
|
||||
"collector.process.iis",
|
||||
"Enable IIS worker process name queries. May cause the collector to leak memory.",
|
||||
"Enable IIS collectWorker process name queries. May cause the collector to leak memory.",
|
||||
).Default(strconv.FormatBool(c.config.EnableWorkerProcess)).BoolVar(&c.config.EnableWorkerProcess)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
@@ -142,8 +156,13 @@ func (c *Collector) GetName() string {
|
||||
}
|
||||
|
||||
func (c *Collector) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.perfDataCollector.Close()
|
||||
|
||||
close(c.workerCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -204,6 +223,14 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
return fmt.Errorf("failed to create Process collector: %w", err)
|
||||
}
|
||||
|
||||
c.workerCh = make(chan processWorkerRequest, 32)
|
||||
c.mu = sync.RWMutex{}
|
||||
c.lookupCache = sync.Map{}
|
||||
|
||||
for range 4 {
|
||||
go c.collectWorker()
|
||||
}
|
||||
|
||||
if c.config.ProcessInclude.String() == "^(?:.*)$" && c.config.ProcessExclude.String() == "^(?:)$" {
|
||||
logger.Warn("No filters specified for process collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
@@ -306,8 +333,6 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
c.lookupCache = make(map[string]string)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -333,6 +358,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
for name, process := range perfData {
|
||||
// Duplicate processes are suffixed #, and an index number. Remove those.
|
||||
name, _, _ = strings.Cut(name, "#")
|
||||
@@ -341,194 +368,230 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
continue
|
||||
}
|
||||
|
||||
var pid uint64
|
||||
wg.Add(1)
|
||||
|
||||
if v, ok := process[processID]; ok {
|
||||
pid = uint64(v.FirstValue)
|
||||
} else if v, ok = process[idProcess]; ok {
|
||||
pid = uint64(v.FirstValue)
|
||||
c.workerCh <- processWorkerRequest{
|
||||
ch: ch,
|
||||
name: name,
|
||||
performanceCounterValues: process,
|
||||
workerProcesses: workerProcesses,
|
||||
waitGroup: wg,
|
||||
}
|
||||
|
||||
parentPID := strconv.FormatUint(uint64(process[creatingProcessID].FirstValue), 10)
|
||||
|
||||
if c.config.EnableWorkerProcess {
|
||||
for _, wp := range workerProcesses {
|
||||
if wp.ProcessId == pid {
|
||||
name = strings.Join([]string{name, wp.AppPoolName}, "_")
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cmdLine, processOwner, processGroupID, err := c.getProcessInformation(uint32(pid))
|
||||
if err != nil {
|
||||
c.logger.Debug("Failed to get process information",
|
||||
slog.Uint64("pid", pid),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
|
||||
pidString := strconv.FormatUint(pid, 10)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.info,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
name, pidString, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.startTime,
|
||||
prometheus.GaugeValue,
|
||||
process[elapsedTime].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.handleCount,
|
||||
prometheus.GaugeValue,
|
||||
process[handleCount].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process[percentPrivilegedTime].FirstValue,
|
||||
name, pidString, "privileged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process[percentUserTime].FirstValue,
|
||||
name, pidString, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioOtherBytesPerSec].FirstValue,
|
||||
name, pidString, "other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioOtherOperationsPerSec].FirstValue,
|
||||
name, pidString, "other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioReadBytesPerSec].FirstValue,
|
||||
name, pidString, "read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioReadOperationsPerSec].FirstValue,
|
||||
name, pidString, "read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioWriteBytesPerSec].FirstValue,
|
||||
name, pidString, "write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioWriteOperationsPerSec].FirstValue,
|
||||
name, pidString, "write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pageFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[pageFaultsPerSec].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pageFileBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[pageFileBytes].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[poolNonPagedBytes].FirstValue,
|
||||
name, pidString, "nonpaged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[poolPagedBytes].FirstValue,
|
||||
name, pidString, "paged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.priorityBase,
|
||||
prometheus.GaugeValue,
|
||||
process[priorityBase].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.privateBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[privateBytes].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.threadCount,
|
||||
prometheus.GaugeValue,
|
||||
process[threadCount].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.virtualBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[virtualBytes].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSetPrivate,
|
||||
prometheus.GaugeValue,
|
||||
process[workingSetPrivate].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSetPeak,
|
||||
prometheus.GaugeValue,
|
||||
process[workingSetPeak].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSet,
|
||||
prometheus.GaugeValue,
|
||||
process[workingSet].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectWorker() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
c.logger.Error("Worker panic",
|
||||
slog.Any("panic", r),
|
||||
slog.String("stack", string(debug.Stack())),
|
||||
)
|
||||
|
||||
// Restart the collectWorker
|
||||
go c.collectWorker()
|
||||
}
|
||||
}()
|
||||
|
||||
for req := range c.workerCh {
|
||||
(func() {
|
||||
defer req.waitGroup.Done()
|
||||
|
||||
ch := req.ch
|
||||
name := req.name
|
||||
process := req.performanceCounterValues
|
||||
|
||||
var pid uint64
|
||||
|
||||
if v, ok := process[processID]; ok {
|
||||
pid = uint64(v.FirstValue)
|
||||
} else if v, ok = process[idProcess]; ok {
|
||||
pid = uint64(v.FirstValue)
|
||||
}
|
||||
|
||||
parentPID := strconv.FormatUint(uint64(process[creatingProcessID].FirstValue), 10)
|
||||
|
||||
if c.config.EnableWorkerProcess {
|
||||
for _, wp := range req.workerProcesses {
|
||||
if wp.ProcessId == pid {
|
||||
name = strings.Join([]string{name, wp.AppPoolName}, "_")
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cmdLine, processOwner, processGroupID, err := c.getProcessInformation(uint32(pid))
|
||||
if err != nil {
|
||||
c.logger.Debug("Failed to get process information",
|
||||
slog.Uint64("pid", pid),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
|
||||
pidString := strconv.FormatUint(pid, 10)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.info,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
name, pidString, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.startTime,
|
||||
prometheus.GaugeValue,
|
||||
process[elapsedTime].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.handleCount,
|
||||
prometheus.GaugeValue,
|
||||
process[handleCount].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process[percentPrivilegedTime].FirstValue,
|
||||
name, pidString, "privileged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process[percentUserTime].FirstValue,
|
||||
name, pidString, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioOtherBytesPerSec].FirstValue,
|
||||
name, pidString, "other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioOtherOperationsPerSec].FirstValue,
|
||||
name, pidString, "other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioReadBytesPerSec].FirstValue,
|
||||
name, pidString, "read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioReadOperationsPerSec].FirstValue,
|
||||
name, pidString, "read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioWriteBytesPerSec].FirstValue,
|
||||
name, pidString, "write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[ioWriteOperationsPerSec].FirstValue,
|
||||
name, pidString, "write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pageFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
process[pageFaultsPerSec].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pageFileBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[pageFileBytes].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[poolNonPagedBytes].FirstValue,
|
||||
name, pidString, "nonpaged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[poolPagedBytes].FirstValue,
|
||||
name, pidString, "paged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.priorityBase,
|
||||
prometheus.GaugeValue,
|
||||
process[priorityBase].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.privateBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[privateBytes].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.threadCount,
|
||||
prometheus.GaugeValue,
|
||||
process[threadCount].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.virtualBytes,
|
||||
prometheus.GaugeValue,
|
||||
process[virtualBytes].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSetPrivate,
|
||||
prometheus.GaugeValue,
|
||||
process[workingSetPrivate].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSetPeak,
|
||||
prometheus.GaugeValue,
|
||||
process[workingSetPeak].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSet,
|
||||
prometheus.GaugeValue,
|
||||
process[workingSet].FirstValue,
|
||||
name, pidString,
|
||||
)
|
||||
})()
|
||||
}
|
||||
}
|
||||
|
||||
// ref: https://github.com/microsoft/hcsshim/blob/8beabacfc2d21767a07c20f8dd5f9f3932dbf305/internal/uvm/stats.go#L25
|
||||
func (c *Collector) getProcessInformation(pid uint32) (string, string, uint32, error) {
|
||||
if pid == 0 {
|
||||
@@ -646,7 +709,14 @@ func (c *Collector) getProcessOwner(logger *slog.Logger, hProcess windows.Handle
|
||||
|
||||
sid := tokenUser.User.Sid.String()
|
||||
|
||||
owner, ok := c.lookupCache[sid]
|
||||
var owner string
|
||||
|
||||
ownerVal, ok := c.lookupCache.Load(sid)
|
||||
|
||||
if ok {
|
||||
owner, ok = ownerVal.(string)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
account, domain, _, err := tokenUser.User.Sid.LookupAccount("")
|
||||
if err != nil {
|
||||
@@ -655,7 +725,7 @@ func (c *Collector) getProcessOwner(logger *slog.Logger, hProcess windows.Handle
|
||||
owner = fmt.Sprintf(`%s\%s`, account, domain)
|
||||
}
|
||||
|
||||
c.lookupCache[sid] = owner
|
||||
c.lookupCache.Store(sid, owner)
|
||||
}
|
||||
|
||||
return owner, nil
|
||||
|
||||
@@ -33,6 +33,7 @@ const Name = "remote_fx"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// Collector
|
||||
|
||||
@@ -20,24 +20,30 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
"github.com/prometheus-community/windows_exporter/internal/headers/schedule_service"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const Name = "scheduled_task"
|
||||
const (
|
||||
Name = "scheduled_task"
|
||||
|
||||
workerCount = 4
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
TaskExclude *regexp.Regexp `yaml:"task_exclude"`
|
||||
TaskInclude *regexp.Regexp `yaml:"task_include"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
TaskExclude: types.RegExpEmpty,
|
||||
TaskInclude: types.RegExpAny,
|
||||
@@ -46,8 +52,11 @@ var ConfigDefaults = Config{
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
scheduledTasksReqCh chan struct{}
|
||||
scheduledTasksCh chan *scheduledTaskResults
|
||||
logger *slog.Logger
|
||||
|
||||
scheduledTasksReqCh chan struct{}
|
||||
scheduledTasksWorker chan scheduledTaskWorkerRequest
|
||||
scheduledTasksCh chan scheduledTaskResults
|
||||
|
||||
lastResult *prometheus.Desc
|
||||
missedRuns *prometheus.Desc
|
||||
@@ -73,6 +82,7 @@ const (
|
||||
SCHED_S_TASK_HAS_NOT_RUN TaskResult = 0x00041303
|
||||
)
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var taskStates = []string{"disabled", "queued", "ready", "running", "unknown"}
|
||||
|
||||
type scheduledTask struct {
|
||||
@@ -85,8 +95,13 @@ type scheduledTask struct {
|
||||
}
|
||||
|
||||
type scheduledTaskResults struct {
|
||||
scheduledTasks []scheduledTask
|
||||
err error
|
||||
tasks []scheduledTask
|
||||
err error
|
||||
}
|
||||
|
||||
type scheduledTaskWorkerRequest struct {
|
||||
folderPath string
|
||||
results chan<- scheduledTaskResults
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -157,10 +172,13 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
c.logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
initErrCh := make(chan error)
|
||||
c.scheduledTasksReqCh = make(chan struct{})
|
||||
c.scheduledTasksCh = make(chan *scheduledTaskResults)
|
||||
c.scheduledTasksCh = make(chan scheduledTaskResults)
|
||||
c.scheduledTasksWorker = make(chan scheduledTaskWorkerRequest, 100)
|
||||
|
||||
go c.initializeScheduleService(initErrCh)
|
||||
|
||||
@@ -256,68 +274,41 @@ func (c *Collector) getScheduledTasks() ([]scheduledTask, error) {
|
||||
return []scheduledTask{}, nil
|
||||
}
|
||||
|
||||
if scheduledTasks == nil {
|
||||
return nil, errors.New("scheduled tasks channel is nil")
|
||||
}
|
||||
|
||||
if scheduledTasks.err != nil {
|
||||
return nil, scheduledTasks.err
|
||||
}
|
||||
|
||||
return scheduledTasks.scheduledTasks, scheduledTasks.err
|
||||
return scheduledTasks.tasks, scheduledTasks.err
|
||||
}
|
||||
|
||||
func (c *Collector) initializeScheduleService(initErrCh chan<- error) {
|
||||
// The only way to run WMI queries in parallel while being thread-safe is to
|
||||
// ensure the CoInitialize[Ex]() call is bound to its current OS thread.
|
||||
// Otherwise, attempting to initialize and run parallel queries across
|
||||
// goroutines will result in protected memory errors.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
service := schedule_service.New()
|
||||
if err := service.Connect(); err != nil {
|
||||
initErrCh <- fmt.Errorf("failed to connect to schedule service: %w", err)
|
||||
|
||||
if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
|
||||
var oleCode *ole.OleError
|
||||
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != 0x00000001 {
|
||||
initErrCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
defer service.Close()
|
||||
|
||||
errs := make([]error, 0, workerCount)
|
||||
|
||||
for range workerCount {
|
||||
errCh := make(chan error, workerCount)
|
||||
|
||||
go c.collectWorker(errCh)
|
||||
|
||||
if err := <-errCh; err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
scheduleClassID, err := ole.ClassIDFrom("Schedule.Service.1")
|
||||
if err != nil {
|
||||
if err := errors.Join(errs...); err != nil {
|
||||
initErrCh <- err
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
taskSchedulerObj, err := ole.CreateInstance(scheduleClassID, nil)
|
||||
if err != nil || taskSchedulerObj == nil {
|
||||
initErrCh <- err
|
||||
|
||||
return
|
||||
}
|
||||
defer taskSchedulerObj.Release()
|
||||
|
||||
taskServiceObj := taskSchedulerObj.MustQueryInterface(ole.IID_IDispatch)
|
||||
defer taskServiceObj.Release()
|
||||
|
||||
taskService, err := oleutil.CallMethod(taskServiceObj, "Connect")
|
||||
if err != nil {
|
||||
initErrCh <- err
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
defer func(taskService *ole.VARIANT) {
|
||||
_ = taskService.Clear()
|
||||
}(taskService)
|
||||
|
||||
close(initErrCh)
|
||||
|
||||
scheduledTasks := make([]scheduledTask, 0, 100)
|
||||
taskServiceObj := service.GetOLETaskServiceObj()
|
||||
scheduledTasks := make([]scheduledTask, 0, 500)
|
||||
|
||||
for range c.scheduledTasksReqCh {
|
||||
func() {
|
||||
@@ -327,30 +318,102 @@ func (c *Collector) initializeScheduleService(initErrCh chan<- error) {
|
||||
|
||||
res, err := oleutil.CallMethod(taskServiceObj, "GetFolder", `\`)
|
||||
if err != nil {
|
||||
c.scheduledTasksCh <- &scheduledTaskResults{err: err}
|
||||
|
||||
return
|
||||
c.scheduledTasksCh <- scheduledTaskResults{err: err}
|
||||
}
|
||||
|
||||
rootFolderObj := res.ToIDispatch()
|
||||
defer rootFolderObj.Release()
|
||||
|
||||
err = fetchTasksRecursively(rootFolderObj, &scheduledTasks)
|
||||
errs := make([]error, 0)
|
||||
scheduledTasksWorkerResults := make(chan scheduledTaskResults)
|
||||
|
||||
c.scheduledTasksCh <- &scheduledTaskResults{scheduledTasks: scheduledTasks, err: err}
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
go func() {
|
||||
for workerResults := range scheduledTasksWorkerResults {
|
||||
wg.Done()
|
||||
|
||||
if workerResults.err != nil {
|
||||
errs = append(errs, workerResults.err)
|
||||
}
|
||||
|
||||
if workerResults.tasks != nil {
|
||||
errs = append(errs, workerResults.err)
|
||||
|
||||
scheduledTasks = append(scheduledTasks, workerResults.tasks...)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := c.fetchRecursively(rootFolderObj, wg, scheduledTasksWorkerResults); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
close(scheduledTasksWorkerResults)
|
||||
|
||||
c.scheduledTasksCh <- scheduledTaskResults{tasks: scheduledTasks, err: errors.Join(errs...)}
|
||||
}()
|
||||
}
|
||||
|
||||
close(c.scheduledTasksCh)
|
||||
close(c.scheduledTasksWorker)
|
||||
|
||||
c.scheduledTasksCh = nil
|
||||
c.scheduledTasksWorker = nil
|
||||
}
|
||||
|
||||
func fetchTasksRecursively(folder *ole.IDispatch, scheduledTasks *[]scheduledTask) error {
|
||||
if err := fetchTasksInFolder(folder, scheduledTasks); err != nil {
|
||||
return err
|
||||
func (c *Collector) collectWorker(errCh chan<- error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
c.logger.Error("worker panic",
|
||||
slog.Any("panic", r),
|
||||
)
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
// Restart the collectWorker
|
||||
go c.collectWorker(errCh)
|
||||
|
||||
if err := <-errCh; err != nil {
|
||||
c.logger.Error("failed to restart worker",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
service := schedule_service.New()
|
||||
if err := service.Connect(); err != nil {
|
||||
errCh <- fmt.Errorf("failed to connect to schedule service: %w", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
close(errCh)
|
||||
|
||||
defer service.Close()
|
||||
|
||||
taskServiceObj := service.GetOLETaskServiceObj()
|
||||
|
||||
for task := range c.scheduledTasksWorker {
|
||||
scheduledTasks, err := fetchTasksInFolder(taskServiceObj, task.folderPath)
|
||||
|
||||
task.results <- scheduledTaskResults{tasks: scheduledTasks, err: err}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) fetchRecursively(folder *ole.IDispatch, wg *sync.WaitGroup, results chan<- scheduledTaskResults) error {
|
||||
folderPathVariant, err := oleutil.GetProperty(folder, "Path")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get folder path: %w", err)
|
||||
}
|
||||
|
||||
folderPath := folderPathVariant.ToString()
|
||||
|
||||
wg.Add(1)
|
||||
c.scheduledTasksWorker <- scheduledTaskWorkerRequest{folderPath: folderPath, results: results}
|
||||
|
||||
res, err := oleutil.CallMethod(folder, "GetFolders", 1)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -359,25 +422,41 @@ func fetchTasksRecursively(folder *ole.IDispatch, scheduledTasks *[]scheduledTas
|
||||
subFolders := res.ToIDispatch()
|
||||
defer subFolders.Release()
|
||||
|
||||
err = oleutil.ForEach(subFolders, func(v *ole.VARIANT) error {
|
||||
return oleutil.ForEach(subFolders, func(v *ole.VARIANT) error {
|
||||
subFolder := v.ToIDispatch()
|
||||
defer subFolder.Release()
|
||||
|
||||
return fetchTasksRecursively(subFolder, scheduledTasks)
|
||||
return c.fetchRecursively(subFolder, wg, results)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func fetchTasksInFolder(folder *ole.IDispatch, scheduledTasks *[]scheduledTask) error {
|
||||
res, err := oleutil.CallMethod(folder, "GetTasks", 1)
|
||||
func fetchTasksInFolder(taskServiceObj *ole.IDispatch, folderPath string) ([]scheduledTask, error) {
|
||||
folderObjRes, err := oleutil.CallMethod(taskServiceObj, "GetFolder", folderPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fmt.Errorf("failed to get folder %s: %w", folderPath, err)
|
||||
}
|
||||
|
||||
tasks := res.ToIDispatch()
|
||||
folderObj := folderObjRes.ToIDispatch()
|
||||
defer folderObj.Release()
|
||||
|
||||
tasksRes, err := oleutil.CallMethod(folderObj, "GetTasks", 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tasks in folder %s: %w", folderPath, err)
|
||||
}
|
||||
|
||||
tasks := tasksRes.ToIDispatch()
|
||||
defer tasks.Release()
|
||||
|
||||
// Get task count
|
||||
countVariant, err := oleutil.GetProperty(tasks, "Count")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get task count: %w", err)
|
||||
}
|
||||
|
||||
taskCount := int(countVariant.Val)
|
||||
|
||||
scheduledTasks := make([]scheduledTask, 0, taskCount)
|
||||
|
||||
err = oleutil.ForEach(tasks, func(v *ole.VARIANT) error {
|
||||
task := v.ToIDispatch()
|
||||
defer task.Release()
|
||||
@@ -387,12 +466,15 @@ func fetchTasksInFolder(folder *ole.IDispatch, scheduledTasks *[]scheduledTask)
|
||||
return err
|
||||
}
|
||||
|
||||
*scheduledTasks = append(*scheduledTasks, parsedTask)
|
||||
scheduledTasks = append(scheduledTasks, parsedTask)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to iterate over tasks: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
return scheduledTasks, nil
|
||||
}
|
||||
|
||||
func parseTask(task *ole.IDispatch) (scheduledTask, error) {
|
||||
|
||||
@@ -39,6 +39,7 @@ type Config struct {
|
||||
ServiceExclude *regexp.Regexp `yaml:"service_exclude"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
ServiceInclude: types.RegExpAny,
|
||||
ServiceExclude: types.RegExpEmpty,
|
||||
|
||||
@@ -30,6 +30,7 @@ const Name = "smb"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
@@ -133,7 +134,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
c.filesOpened = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_filed_opened_count_total"),
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_files_opened_count_total"),
|
||||
"Files opened on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
|
||||
@@ -33,6 +33,7 @@ const (
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
|
||||
@@ -34,6 +34,7 @@ type Config struct {
|
||||
ServerExclude *regexp.Regexp `yaml:"server_exclude"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
ServerInclude: types.RegExpAny,
|
||||
ServerExclude: types.RegExpEmpty,
|
||||
|
||||
@@ -31,6 +31,7 @@ const Name = "system"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
|
||||
@@ -37,6 +37,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
"metrics",
|
||||
@@ -248,7 +249,7 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) writeTCPCounters(ch chan<- prometheus.Metric, metrics map[string]perfdata.CounterValues, labels []string) {
|
||||
func (c *Collector) writeTCPCounters(ch chan<- prometheus.Metric, metrics map[string]perfdata.CounterValue, labels []string) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.connectionFailures,
|
||||
prometheus.CounterValue,
|
||||
|
||||
@@ -39,6 +39,7 @@ const (
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Win32_ServerFeature struct {
|
||||
|
||||
@@ -42,6 +42,7 @@ type Config struct {
|
||||
TextFileDirectories []string `yaml:"text_file_directories"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
TextFileDirectories: []string{getDefaultPath()},
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var baseDir = "../../../tools/textfile-test"
|
||||
|
||||
//nolint:paralleltest
|
||||
|
||||
20
internal/collector/thermalzone/const.go
Normal file
20
internal/collector/thermalzone/const.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package thermalzone
|
||||
|
||||
const (
|
||||
highPrecisionTemperature = "High Precision Temperature"
|
||||
percentPassiveLimit = "% Passive Limit"
|
||||
throttleReasons = "Throttle Reasons"
|
||||
)
|
||||
@@ -16,12 +16,12 @@
|
||||
package thermalzone
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
@@ -30,13 +30,14 @@ const Name = "thermalzone"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
miSession *mi.Session
|
||||
miQuery mi.Query
|
||||
config Config
|
||||
|
||||
perfDataCollector *perfdata.Collector
|
||||
|
||||
percentPassiveLimit *prometheus.Desc
|
||||
temperature *prometheus.Desc
|
||||
@@ -67,19 +68,18 @@ func (c *Collector) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Name, HighPrecisionTemperature, PercentPassiveLimit, ThrottleReasons FROM Win32_PerfRawData_Counters_ThermalZoneInformation")
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Thermal Zone Information", perfdata.InstancesAll, []string{
|
||||
highPrecisionTemperature,
|
||||
percentPassiveLimit,
|
||||
throttleReasons,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.temperature = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"),
|
||||
"(Temperature)",
|
||||
@@ -111,53 +111,32 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if err := c.collect(ch); err != nil {
|
||||
return fmt.Errorf("failed collecting thermalzone metrics: %w", err)
|
||||
perfData, err := c.perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Thermal Zone Information metrics: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Counters_ThermalZoneInformation docs:
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/
|
||||
type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
|
||||
Name string `mi:"Name"`
|
||||
HighPrecisionTemperature uint32 `mi:"HighPrecisionTemperature"`
|
||||
PercentPassiveLimit uint32 `mi:"PercentPassiveLimit"`
|
||||
ThrottleReasons uint32 `mi:"ThrottleReasons"`
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
for _, info := range dst {
|
||||
for sensorName, data := range perfData {
|
||||
// Divide by 10 and subtract 273.15 to convert decikelvin to celsius
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.temperature,
|
||||
prometheus.GaugeValue,
|
||||
(float64(info.HighPrecisionTemperature)/10.0)-273.15,
|
||||
info.Name,
|
||||
(data[highPrecisionTemperature].FirstValue/10.0)-273.15,
|
||||
sensorName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.percentPassiveLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.PercentPassiveLimit),
|
||||
info.Name,
|
||||
data[percentPassiveLimit].FirstValue,
|
||||
sensorName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.throttleReasons,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.ThrottleReasons),
|
||||
info.Name,
|
||||
data[throttleReasons].FirstValue,
|
||||
sensorName,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@ type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
collectorSystemTime,
|
||||
@@ -242,7 +243,7 @@ func (c *Collector) collectNTP(ch chan<- prometheus.Metric) error {
|
||||
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("query for Windows Time Service returned empty result set")
|
||||
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -31,6 +31,7 @@ const Name = "udp"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Tcpip_TCPv{4,6} metrics.
|
||||
@@ -157,7 +158,7 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) writeUDPCounters(ch chan<- prometheus.Metric, metrics map[string]perfdata.CounterValues, labels []string) {
|
||||
func (c *Collector) writeUDPCounters(ch chan<- prometheus.Metric, metrics map[string]perfdata.CounterValue, labels []string) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.datagramsNoPortTotal,
|
||||
prometheus.CounterValue,
|
||||
|
||||
@@ -41,6 +41,7 @@ type Config struct {
|
||||
scrapeInterval time.Duration `yaml:"scrape_interval"`
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{
|
||||
online: false,
|
||||
scrapeInterval: 6 * time.Hour,
|
||||
|
||||
@@ -32,6 +32,7 @@ const Name = "vmware"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics.
|
||||
@@ -266,7 +267,7 @@ func (c *Collector) collectMem(ch chan<- prometheus.Metric) error {
|
||||
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("query for VM Memory returned empty result set")
|
||||
return fmt.Errorf("failed to collect VM Memory metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
@@ -347,12 +348,12 @@ func (c *Collector) collectMem(ch chan<- prometheus.Metric) error {
|
||||
func (c *Collector) collectCpu(ch chan<- prometheus.Metric) error {
|
||||
perfData, err := c.perfDataCollectorCPU.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
|
||||
return fmt.Errorf("failed to collect VM CPU metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.InstanceTotal]
|
||||
if !ok {
|
||||
return errors.New("query for VM CPU returned empty result set")
|
||||
return fmt.Errorf("failed to collect VM CPU metrics: %w", types.ErrNoData)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var (
|
||||
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
|
||||
procGetExtendedTcpTable = modiphlpapi.NewProc("GetExtendedTcpTable")
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var (
|
||||
kernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@ type WorkstationInfo struct {
|
||||
LoggedOnUsers uint32
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var (
|
||||
netapi32 = windows.NewLazySystemDLL("netapi32")
|
||||
procNetWkstaGetInfo = netapi32.NewProc("NetWkstaGetInfo")
|
||||
@@ -53,6 +54,8 @@ var (
|
||||
|
||||
// NetApiStatus is a map of Network Management Error Codes.
|
||||
// https://docs.microsoft.com/en-gb/windows/win32/netmgmt/network-management-error-codes?redirectedfrom=MSDN
|
||||
//
|
||||
//nolint:gochecknoglobals
|
||||
var NetApiStatus = map[uint32]string{
|
||||
// Success
|
||||
0: "NERR_Success",
|
||||
|
||||
@@ -40,6 +40,7 @@ type PerformanceInformation struct {
|
||||
ThreadCount uint32
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var (
|
||||
psapi = windows.NewLazySystemDLL("psapi.dll")
|
||||
procGetPerformanceInfo = psapi.NewProc("GetPerformanceInfo")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user