Compare commits

...

17 Commits

Author SHA1 Message Date
Jan-Otto Kröpke
4e460bc24c exchange: enable all collectors as default (#1572) 2024-08-17 21:00:05 +02:00
Jan-Otto Kröpke
b5ceb27836 time: fix windows_time_computed_time_offset_seconds docs (#1571)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
2024-08-17 20:07:25 +02:00
Jan-Otto Kröpke
f6965b10f4 mssql: fix nil panic, if YAML file is used a configuration (#1570) 2024-08-14 08:28:46 +02:00
dependabot[bot]
d7f052fb27 chore(deps): bump golang.org/x/sys from 0.23.0 to 0.24.0 (#1566)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-12 14:24:45 +02:00
Jan-Otto Kröpke
ca4ad46e2d scheduled_task: do not report windows_scheduled_task_last_result for task that never run before (#1562) 2024-08-11 15:47:59 +02:00
Jan-Otto Kröpke
dd956c986b chore: Refactor Config Collector API (#1558) 2024-08-11 13:28:39 +02:00
Jan-Otto Kröpke
7bb16d2f5b textfile: disable collector by default (#1560) 2024-08-11 12:57:14 +02:00
Jan-Otto Kröpke
e478843faa chore: deprecate teradici_pcoip and vmware_blast collector (#1559) 2024-08-11 11:32:01 +02:00
Jan-Otto Kröpke
9b02e4a0ea chore: enable more linter (#1557) 2024-08-10 22:05:33 +02:00
Jan-Otto Kröpke
27a3553dac *: cleanup collector API 3 (#1556) 2024-08-10 20:02:07 +02:00
Jan-Otto Kröpke
b2548e02bd installer: do not enable V1 counters by default (#1555) 2024-08-10 14:39:11 +02:00
Jan-Otto Kröpke
0ada09ee3c *: Collector API cleanup 2 (#1552) 2024-08-05 23:40:32 +02:00
Jan-Otto Kröpke
3c360c05f3 Update pr-check.yaml
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2024-08-05 20:01:59 +02:00
PrometheusBot
fe8e72e38f Synchronize common files from prometheus/prometheus (#1551) 2024-08-05 20:01:08 +02:00
dependabot[bot]
2d091c7187 chore(deps): bump golang.org/x/sys from 0.22.0 to 0.23.0 (#1549)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-05 15:57:18 +02:00
dependabot[bot]
b4999eda4b chore(deps): bump actions/checkout from 3 to 4 (#1550)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-05 15:57:10 +02:00
Jan-Otto Kröpke
d1e3a63f93 *: cleanup collector API 1 (#1547) 2024-08-05 15:50:41 +02:00
122 changed files with 7763 additions and 7245 deletions

3
.gitattributes vendored Normal file
View File

@@ -0,0 +1,3 @@
*.go text eol=lf
*.sh text eol=lf
Makefile text eol=lf

View File

@@ -100,5 +100,5 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.58
args: "--timeout=5m --out-format github-actions,colored-line-number"
version: v1.59
args: "--timeout=5m"

View File

@@ -33,14 +33,16 @@ jobs:
name: check title prefix
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: check
run: |
PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1)
if [[ ! -d "pkg/collector/$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]]; then
echo "PR title must start with an name of an collector package"
echo "Example: 'logical_disk: description'"
exit 1
if [[ -d "pkg/collector/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
exit 0
fi
echo "PR title must start with an name of an collector package"
echo "Example: 'logical_disk: description'"
exit 1
env:
PR_TITLE: ${{ github.event.pull_request.title }}

View File

@@ -77,7 +77,7 @@ jobs:
run: |
$ErrorActionPreference = "Stop"
$Version = git describe --tag
$Version = git describe --tags --always
$Version = $Version -replace 'v', ''
# '+' symbols are invalid characters in image tags
$Version = $Version -replace '\+', '_'

View File

@@ -1,100 +1,52 @@
linters:
enable-all: true
disable:
- asasalint
- asciicheck
- bidichk
- bodyclose
- canonicalheader
- containedctx
- contextcheck
- copyloopvar
- cyclop
- decorder
- depguard
- dogsled
- dupl
- dupword
- durationcheck
- err113
- errchkjson
- errname
- errorlint
- exhaustive
- exhaustruct
- exportloopref
- fatcontext
- forbidigo
- forcetypeassert
- funlen
- gci
- ginkgolinter
- gocheckcompilerdirectives
- gochecknoglobals
- gochecknoinits
- gochecksumtype
- gocognit
- goconst
- gocritic
- gocyclo
- godot
- godox
- gofumpt
- goheader
- goimports
- gomoddirectives
- gomodguard
- goprintffuncname
- gosec
- gosimple
- gosmopolitan
- grouper
- importas
- inamedparam
- interfacebloat
- intrange
- ireturn
- lll
- maintidx
- makezero
- mirror
- misspell
- mnd
- musttag
- nakedret
- nestif
- nlreturn
- noctx
- nolintlint
- nonamedreturns
- nosprintfhostport
- paralleltest
- predeclared
- protogetter
- reassign
- rowserrcheck
- sloglint
- spancheck
- sqlclosecheck
- staticcheck
- stylecheck
- tagalign
- tagliatelle
- tenv
- testableexamples
- testifylint
- testpackage
- thelper
- tparallel
- usestdlibvars
- varnamelen
- wastedassign
- whitespace
- wrapcheck
- wsl
- zerologlint
- execinquery
- gomnd
- stylecheck
- maintidx
linters-settings:
gci:
sections:
- prefix(github.com/prometheus-community/windows_exporter/pkg/initiate)
- standard # Standard section: captures all standard packages.
- default # Default section: contains all imports that could not be matched to another section type.
custom-order: true
tagliatelle:
case:
use-field-name: true
rules:
# Any struct tag type can be used.
# Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`
json: camel
yaml: snake
issues:
exclude:

View File

@@ -60,7 +60,7 @@ Name | Description | Enabled by default
[time](docs/collector.time.md) | Windows Time Service |
[thermalzone](docs/collector.thermalzone.md) | Thermal information
[terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS)
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | &#10003;
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file |
[vmware_blast](docs/collector.vmware_blast.md) | VMware Blast session metrics |
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |

7
config.yaml Normal file
View File

@@ -0,0 +1,7 @@
collectors:
enabled: cpu,cpu_info,cs,exchange,iis,logical_disk,logon,memory,net,os,process,remote_fx,service,system,tcp,time,terminal_services,textfile
collector:
service:
services-where: "Name='windows_exporter'"
log:
level: warn

View File

@@ -5,11 +5,11 @@ If the Windows Time Service is stopped after collection has started, collector m
Please note the Time Service perflib counters are only available on [Windows Server 2016 or newer](https://docs.microsoft.com/en-us/windows-server/networking/windows-time-service/windows-server-2016-improvements).
|||
-|-
Metric name prefix | `time`
Data source | Perflib
Enabled by default? | No
| | |
|---------------------|---------|
| Metric name prefix | `time` |
| Data source | Perflib |
| Enabled by default? | No |
## Flags
@@ -17,14 +17,14 @@ None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_time_clock_frequency_Adjustment_ppb_total` | Total adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | counter | None
`windows_time_computed_time_offset_seconds` | Absolute time offset between the system clock and the chosen time source, in seconds. | counter | None
`windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None
`windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None
`windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None
`windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None
| Name | Description | Type | Labels |
|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|--------|
| `windows_time_clock_frequency_adjustment_ppb_total` | Total adjustment made to the local system clock frequency by W32Time in parts per billion (PPB) units. 1 PPB adjustment implies the system clock was adjusted at a rate of 1 nanosecond per second (1 ns/s). The smallest possible adjustment can vary and is expected to be in the order of 100's of PPB. | counter | None |
| `windows_time_computed_time_offset_seconds` | The absolute time offset between the system clock and the chosen time source, as computed by the W32Time service in microseconds. When a new valid sample is available, the computed time is updated with the time offset indicated by the sample. This time is the actual time offset of the local clock. W32Time initiates clock correction by using this offset and updates the computed time in between samples with the remaining time offset that needs to be applied to the local clock. Clock accuracy can be tracked by using this performance counter with a low polling interval (for example, 256 seconds or less) and looking for the counter value to be smaller than the desired clock accuracy limit. | gauge | None |
| `windows_time_ntp_client_time_sources` | Active number of NTP Time sources being used by the client. This is a count of active, distinct IP addresses of time servers that are responding to this client's requests. | gauge | None |
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_

View File

@@ -1,7 +1,7 @@
---
# Note this is not an exhaustive list of all configuration values
collectors:
enabled: cpu,cs,logical_disk,net,os,service,system,textfile
enabled: cpu,cs,logical_disk,net,os,service,system
collector:
service:
services-where: Name='windows_exporter'

View File

@@ -4,41 +4,42 @@
package main
//goland:noinspection GoUnsortedImport
//nolint:gofumpt
import (
// Its important that we do these first so that we can register with the Windows service control ASAP to avoid timeouts
// Its important that we do these first so that we can register with the Windows service control ASAP to avoid timeouts.
"github.com/prometheus-community/windows_exporter/pkg/initiate"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"os/user"
"runtime"
"sort"
"strings"
winlog "github.com/prometheus-community/windows_exporter/pkg/log"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/collector"
"github.com/prometheus-community/windows_exporter/pkg/config"
winlog "github.com/prometheus-community/windows_exporter/pkg/log"
"github.com/prometheus-community/windows_exporter/pkg/log/flag"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web"
webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
"golang.org/x/sys/windows"
)
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
const PROCESS_ALL_ACCESS = windows.STANDARD_RIGHTS_REQUIRED | windows.SYNCHRONIZE | windows.SPECIFIC_RIGHTS_ALL
// Same struct prometheus uses for their /version endpoint.
// Separate copy to avoid pulling all of prometheus as a dependency
// Separate copy to avoid pulling all of prometheus as a dependency.
type prometheusVersion struct {
Version string `json:"version"`
Revision string `json:"revision"`
@@ -48,7 +49,7 @@ type prometheusVersion struct {
GoVersion string `json:"goVersion"`
}
// Mapping of priority names to uin32 values required by windows.SetPriorityClass
// Mapping of priority names to uin32 values required by windows.SetPriorityClass.
var priorityStringToInt = map[string]uint32{
"realtime": windows.REALTIME_PRIORITY_CLASS,
"high": windows.HIGH_PRIORITY_CLASS,
@@ -59,18 +60,23 @@ var priorityStringToInt = map[string]uint32{
}
func setPriorityWindows(pid int, priority uint32) error {
handle, err := windows.OpenProcess(PROCESS_ALL_ACCESS, false, uint32(pid))
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
handle, err := windows.OpenProcess(
windows.STANDARD_RIGHTS_REQUIRED|windows.SYNCHRONIZE|windows.SPECIFIC_RIGHTS_ALL,
false, uint32(pid),
)
if err != nil {
return err
}
//nolint:errcheck
defer windows.CloseHandle(handle) // Technically this can fail, but we ignore it
err = windows.SetPriorityClass(handle, priority)
if err != nil {
if err = windows.SetPriorityClass(handle, priority); err != nil {
return err
}
if err = windows.CloseHandle(handle); err != nil {
return fmt.Errorf("failed to close handle: %w", err)
}
return nil
}
@@ -171,9 +177,9 @@ func main() {
collectorNames := collector.Available()
sort.Strings(collectorNames)
fmt.Printf("Available collectors:\n")
fmt.Printf("Available collectors:\n") //nolint:forbidigo
for _, n := range collectorNames {
fmt.Printf(" - %s\n", n)
fmt.Printf(" - %s\n", n) //nolint:forbidigo
}
return
@@ -259,20 +265,37 @@ func main() {
_ = level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext())
_ = level.Debug(logger).Log("msg", "Go MAXPROCS", "procs", runtime.GOMAXPROCS(0))
server := &http.Server{
ReadHeaderTimeout: 5 * time.Second,
IdleTimeout: 60 * time.Second,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Minute,
Handler: mux,
}
go func() {
server := &http.Server{Handler: mux}
if err := web.ListenAndServe(server, webConfig, logger); err != nil {
_ = level.Error(logger).Log("msg", "cannot start windows_exporter", "err", err)
os.Exit(1)
}
}()
for {
if <-initiate.StopCh {
_ = level.Info(logger).Log("msg", "Shutting down windows_exporter")
break
}
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
defer stop()
select {
case <-ctx.Done():
_ = level.Info(logger).Log("msg", "Shutting down windows_exporter via kill signal")
case <-initiate.StopCh:
_ = level.Info(logger).Log("msg", "Shutting down windows_exporter via service control")
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = server.Shutdown(ctx)
_ = level.Info(logger).Log("msg", "windows_exporter has shut down")
}
func withConcurrencyLimit(n int, next http.HandlerFunc) http.HandlerFunc {

5
go.mod
View File

@@ -17,10 +17,12 @@ require (
github.com/yusufpapurcu/wmi v1.2.4
go.opencensus.io v0.24.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
golang.org/x/sys v0.22.0
golang.org/x/sys v0.24.0
gopkg.in/yaml.v3 v3.0.1
)
require github.com/pkg/errors v0.9.1
require (
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
@@ -36,7 +38,6 @@ require (
github.com/jpillora/backoff v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect

4
go.sum
View File

@@ -141,8 +141,8 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=

View File

@@ -19,7 +19,7 @@
<Property Id="ADD_FIREWALL_EXCEPTION" Secure="yes" />
<Property Id="ENABLE_V1_PERFORMANCE_COUNTERS" Secure="yes" Value="yes"/>
<Property Id="ENABLE_V1_PERFORMANCE_COUNTERS" Secure="yes" />
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--web.listen-address [LISTEN_ADDR]:[LISTEN_PORT]" Condition="LISTEN_ADDR&lt;&gt;&quot;&quot; OR LISTEN_PORT&lt;&gt;9182" />

File diff suppressed because it is too large Load Diff

View File

@@ -21,120 +21,133 @@ type Config struct{}
var ConfigDefaults = Config{}
type collector struct {
type Collector struct {
config Config
logger log.Logger
RequestsPerSecond *prometheus.Desc
RequestProcessingTime *prometheus.Desc
RetrievalsPerSecond *prometheus.Desc
RetrievalProcessingTime *prometheus.Desc
FailedRequestsPerSecond *prometheus.Desc
IssuedRequestsPerSecond *prometheus.Desc
PendingRequestsPerSecond *prometheus.Desc
RequestCryptographicSigningTime *prometheus.Desc
RequestPolicyModuleProcessingTime *prometheus.Desc
ChallengeResponsesPerSecond *prometheus.Desc
ChallengeResponseProcessingTime *prometheus.Desc
SignedCertificateTimestampListsPerSecond *prometheus.Desc
SignedCertificateTimestampListProcessingTime *prometheus.Desc
challengeResponseProcessingTime *prometheus.Desc
challengeResponsesPerSecond *prometheus.Desc
failedRequestsPerSecond *prometheus.Desc
issuedRequestsPerSecond *prometheus.Desc
pendingRequestsPerSecond *prometheus.Desc
requestCryptographicSigningTime *prometheus.Desc
requestPolicyModuleProcessingTime *prometheus.Desc
requestProcessingTime *prometheus.Desc
requestsPerSecond *prometheus.Desc
retrievalProcessingTime *prometheus.Desc
retrievalsPerSecond *prometheus.Desc
signedCertificateTimestampListProcessingTime *prometheus.Desc
signedCertificateTimestampListsPerSecond *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Certification Authority"}, nil
}
func (c *collector) Build() error {
c.RequestsPerSecond = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.requestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Total certificate requests processed",
[]string{"cert_template"},
nil,
)
c.RequestProcessingTime = prometheus.NewDesc(
c.requestProcessingTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "request_processing_time_seconds"),
"Last time elapsed for certificate requests",
[]string{"cert_template"},
nil,
)
c.RetrievalsPerSecond = prometheus.NewDesc(
c.retrievalsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "retrievals_total"),
"Total certificate retrieval requests processed",
[]string{"cert_template"},
nil,
)
c.RetrievalProcessingTime = prometheus.NewDesc(
c.retrievalProcessingTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "retrievals_processing_time_seconds"),
"Last time elapsed for certificate retrieval request",
[]string{"cert_template"},
nil,
)
c.FailedRequestsPerSecond = prometheus.NewDesc(
c.failedRequestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failed_requests_total"),
"Total failed certificate requests processed",
[]string{"cert_template"},
nil,
)
c.IssuedRequestsPerSecond = prometheus.NewDesc(
c.issuedRequestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "issued_requests_total"),
"Total issued certificate requests processed",
[]string{"cert_template"},
nil,
)
c.PendingRequestsPerSecond = prometheus.NewDesc(
c.pendingRequestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pending_requests_total"),
"Total pending certificate requests processed",
[]string{"cert_template"},
nil,
)
c.RequestCryptographicSigningTime = prometheus.NewDesc(
c.requestCryptographicSigningTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "request_cryptographic_signing_time_seconds"),
"Last time elapsed for signing operation request",
[]string{"cert_template"},
nil,
)
c.RequestPolicyModuleProcessingTime = prometheus.NewDesc(
c.requestPolicyModuleProcessingTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "request_policy_module_processing_time_seconds"),
"Last time elapsed for policy module processing request",
[]string{"cert_template"},
nil,
)
c.ChallengeResponsesPerSecond = prometheus.NewDesc(
c.challengeResponsesPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "challenge_responses_total"),
"Total certificate challenge responses processed",
[]string{"cert_template"},
nil,
)
c.ChallengeResponseProcessingTime = prometheus.NewDesc(
c.challengeResponseProcessingTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "challenge_response_processing_time_seconds"),
"Last time elapsed for challenge response",
[]string{"cert_template"},
nil,
)
c.SignedCertificateTimestampListsPerSecond = prometheus.NewDesc(
c.signedCertificateTimestampListsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "signed_certificate_timestamp_lists_total"),
"Total Signed Certificate Timestamp Lists processed",
[]string{"cert_template"},
nil,
)
c.SignedCertificateTimestampListProcessingTime = prometheus.NewDesc(
c.signedCertificateTimestampListProcessingTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "signed_certificate_timestamp_list_processing_time_seconds"),
"Last time elapsed for Signed Certificate Timestamp List",
[]string{"cert_template"},
@@ -144,7 +157,7 @@ func (c *collector) Build() error {
return nil
}
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectADCSCounters(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "err", err)
return err
@@ -169,7 +182,7 @@ type perflibADCS struct {
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
}
func (c *collector) collectADCSCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibADCS, 0)
if _, ok := ctx.PerfObjects["Certification Authority"]; !ok {
return errors.New("perflib did not contain an entry for Certification Authority")
@@ -188,79 +201,79 @@ func (c *collector) collectADCSCounters(ctx *types.ScrapeContext, ch chan<- prom
continue
}
ch <- prometheus.MustNewConstMetric(
c.RequestsPerSecond,
c.requestsPerSecond,
prometheus.CounterValue,
d.RequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RequestProcessingTime,
c.requestProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RequestProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RetrievalsPerSecond,
c.retrievalsPerSecond,
prometheus.CounterValue,
d.RetrievalsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RetrievalProcessingTime,
c.retrievalProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RetrievalProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailedRequestsPerSecond,
c.failedRequestsPerSecond,
prometheus.CounterValue,
d.FailedRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.IssuedRequestsPerSecond,
c.issuedRequestsPerSecond,
prometheus.CounterValue,
d.IssuedRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PendingRequestsPerSecond,
c.pendingRequestsPerSecond,
prometheus.CounterValue,
d.PendingRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RequestCryptographicSigningTime,
c.requestCryptographicSigningTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RequestCryptographicSigningTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RequestPolicyModuleProcessingTime,
c.requestPolicyModuleProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RequestPolicyModuleProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ChallengeResponsesPerSecond,
c.challengeResponsesPerSecond,
prometheus.CounterValue,
d.ChallengeResponsesPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ChallengeResponseProcessingTime,
c.challengeResponseProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.ChallengeResponseProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SignedCertificateTimestampListsPerSecond,
c.signedCertificateTimestampListsPerSecond,
prometheus.CounterValue,
d.SignedCertificateTimestampListsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SignedCertificateTimestampListProcessingTime,
c.signedCertificateTimestampListProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.SignedCertificateTimestampListProcessingTime),
d.Name,

View File

@@ -18,20 +18,22 @@ type Config struct{}
var ConfigDefaults = Config{}
type collector struct {
type Collector struct {
config Config
logger log.Logger
adLoginConnectionFailures *prometheus.Desc
artifactDBFailures *prometheus.Desc
avgArtifactDBQueryTime *prometheus.Desc
avgConfigDBQueryTime *prometheus.Desc
certificateAuthentications *prometheus.Desc
configDBFailures *prometheus.Desc
deviceAuthentications *prometheus.Desc
externalAuthenticationFailures *prometheus.Desc
externalAuthentications *prometheus.Desc
extranetAccountLockouts *prometheus.Desc
federatedAuthentications *prometheus.Desc
passportAuthentications *prometheus.Desc
passiveRequests *prometheus.Desc
passwordChangeFailed *prometheus.Desc
passwordChangeSucceeded *prometheus.Desc
tokenRequests *prometheus.Desc
windowsIntegratedAuthentications *prometheus.Desc
federationMetadataRequests *prometheus.Desc
oAuthAuthZRequests *prometheus.Desc
oAuthClientAuthentications *prometheus.Desc
oAuthClientAuthenticationsFailures *prometheus.Desc
@@ -50,45 +52,56 @@ type collector struct {
oAuthPasswordGrantRequestFailures *prometheus.Desc
oAuthPasswordGrantRequests *prometheus.Desc
oAuthTokenRequests *prometheus.Desc
passiveRequests *prometheus.Desc
passportAuthentications *prometheus.Desc
passwordChangeFailed *prometheus.Desc
passwordChangeSucceeded *prometheus.Desc
samlPTokenRequests *prometheus.Desc
ssoAuthenticationFailures *prometheus.Desc
ssoAuthentications *prometheus.Desc
wsfedTokenRequests *prometheus.Desc
wstrustTokenRequests *prometheus.Desc
tokenRequests *prometheus.Desc
upAuthenticationFailures *prometheus.Desc
upAuthentications *prometheus.Desc
externalAuthenticationFailures *prometheus.Desc
externalAuthentications *prometheus.Desc
artifactDBFailures *prometheus.Desc
avgArtifactDBQueryTime *prometheus.Desc
configDBFailures *prometheus.Desc
avgConfigDBQueryTime *prometheus.Desc
federationMetadataRequests *prometheus.Desc
windowsIntegratedAuthentications *prometheus.Desc
wsfedTokenRequests *prometheus.Desc
wstrustTokenRequests *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"AD FS"}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.adLoginConnectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
"Total number of connection failures to an Active Directory domain controller",
@@ -397,7 +410,7 @@ type perflibADFS struct {
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
}
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var adfsData []perflibADFS
err := perflib.UnmarshalObject(ctx.PerfObjects["AD FS"], &adfsData, c.logger)
if err != nil {

View File

@@ -6,6 +6,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
@@ -17,233 +18,246 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for Perflib Cache metrics
type collector struct {
// A Collector is a Prometheus Collector for Perflib Cache metrics.
type Collector struct {
config Config
logger log.Logger
AsyncCopyReadsTotal *prometheus.Desc
AsyncDataMapsTotal *prometheus.Desc
AsyncFastReadsTotal *prometheus.Desc
AsyncMDLReadsTotal *prometheus.Desc
AsyncPinReadsTotal *prometheus.Desc
CopyReadHitsTotal *prometheus.Desc
CopyReadsTotal *prometheus.Desc
DataFlushesTotal *prometheus.Desc
DataFlushPagesTotal *prometheus.Desc
DataMapHitsPercent *prometheus.Desc
DataMapPinsTotal *prometheus.Desc
DataMapsTotal *prometheus.Desc
DirtyPages *prometheus.Desc
DirtyPageThreshold *prometheus.Desc
FastReadNotPossiblesTotal *prometheus.Desc
FastReadResourceMissesTotal *prometheus.Desc
FastReadsTotal *prometheus.Desc
LazyWriteFlushesTotal *prometheus.Desc
LazyWritePagesTotal *prometheus.Desc
MDLReadHitsTotal *prometheus.Desc
MDLReadsTotal *prometheus.Desc
PinReadHitsTotal *prometheus.Desc
PinReadsTotal *prometheus.Desc
ReadAheadsTotal *prometheus.Desc
SyncCopyReadsTotal *prometheus.Desc
SyncDataMapsTotal *prometheus.Desc
SyncFastReadsTotal *prometheus.Desc
SyncMDLReadsTotal *prometheus.Desc
SyncPinReadsTotal *prometheus.Desc
asyncCopyReadsTotal *prometheus.Desc
asyncDataMapsTotal *prometheus.Desc
asyncFastReadsTotal *prometheus.Desc
asyncMDLReadsTotal *prometheus.Desc
asyncPinReadsTotal *prometheus.Desc
copyReadHitsTotal *prometheus.Desc
copyReadsTotal *prometheus.Desc
dataFlushesTotal *prometheus.Desc
dataFlushPagesTotal *prometheus.Desc
dataMapHitsPercent *prometheus.Desc
dataMapPinsTotal *prometheus.Desc
dataMapsTotal *prometheus.Desc
dirtyPages *prometheus.Desc
dirtyPageThreshold *prometheus.Desc
fastReadNotPossiblesTotal *prometheus.Desc
fastReadResourceMissesTotal *prometheus.Desc
fastReadsTotal *prometheus.Desc
lazyWriteFlushesTotal *prometheus.Desc
lazyWritePagesTotal *prometheus.Desc
mdlReadHitsTotal *prometheus.Desc
mdlReadsTotal *prometheus.Desc
pinReadHitsTotal *prometheus.Desc
pinReadsTotal *prometheus.Desc
readAheadsTotal *prometheus.Desc
syncCopyReadsTotal *prometheus.Desc
syncDataMapsTotal *prometheus.Desc
syncFastReadsTotal *prometheus.Desc
syncMDLReadsTotal *prometheus.Desc
syncPinReadsTotal *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Cache"}, nil
}
func (c *collector) Build() error {
c.AsyncCopyReadsTotal = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.asyncCopyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)",
nil,
nil,
)
c.AsyncDataMapsTotal = prometheus.NewDesc(
c.asyncDataMapsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_data_maps_total"),
"(AsyncDataMapsTotal)",
nil,
nil,
)
c.AsyncFastReadsTotal = prometheus.NewDesc(
c.asyncFastReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_fast_reads_total"),
"(AsyncFastReadsTotal)",
nil,
nil,
)
c.AsyncMDLReadsTotal = prometheus.NewDesc(
c.asyncMDLReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_mdl_reads_total"),
"(AsyncMDLReadsTotal)",
nil,
nil,
)
c.AsyncPinReadsTotal = prometheus.NewDesc(
c.asyncPinReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_pin_reads_total"),
"(AsyncPinReadsTotal)",
nil,
nil,
)
c.CopyReadHitsTotal = prometheus.NewDesc(
c.copyReadHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "copy_read_hits_total"),
"(CopyReadHitsTotal)",
nil,
nil,
)
c.CopyReadsTotal = prometheus.NewDesc(
c.copyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "copy_reads_total"),
"(CopyReadsTotal)",
nil,
nil,
)
c.DataFlushesTotal = prometheus.NewDesc(
c.dataFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "data_flushes_total"),
"(DataFlushesTotal)",
nil,
nil,
)
c.DataFlushPagesTotal = prometheus.NewDesc(
c.dataFlushPagesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "data_flush_pages_total"),
"(DataFlushPagesTotal)",
nil,
nil,
)
c.DataMapHitsPercent = prometheus.NewDesc(
c.dataMapHitsPercent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "data_map_hits_percent"),
"(DataMapHitsPercent)",
nil,
nil,
)
c.DataMapPinsTotal = prometheus.NewDesc(
c.dataMapPinsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "data_map_pins_total"),
"(DataMapPinsTotal)",
nil,
nil,
)
c.DataMapsTotal = prometheus.NewDesc(
c.dataMapsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "data_maps_total"),
"(DataMapsTotal)",
nil,
nil,
)
c.DirtyPages = prometheus.NewDesc(
c.dirtyPages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dirty_pages"),
"(DirtyPages)",
nil,
nil,
)
c.DirtyPageThreshold = prometheus.NewDesc(
c.dirtyPageThreshold = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dirty_page_threshold"),
"(DirtyPageThreshold)",
nil,
nil,
)
c.FastReadNotPossiblesTotal = prometheus.NewDesc(
c.fastReadNotPossiblesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "fast_read_not_possibles_total"),
"(FastReadNotPossiblesTotal)",
nil,
nil,
)
c.FastReadResourceMissesTotal = prometheus.NewDesc(
c.fastReadResourceMissesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "fast_read_resource_misses_total"),
"(FastReadResourceMissesTotal)",
nil,
nil,
)
c.FastReadsTotal = prometheus.NewDesc(
c.fastReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "fast_reads_total"),
"(FastReadsTotal)",
nil,
nil,
)
c.LazyWriteFlushesTotal = prometheus.NewDesc(
c.lazyWriteFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "lazy_write_flushes_total"),
"(LazyWriteFlushesTotal)",
nil,
nil,
)
c.LazyWritePagesTotal = prometheus.NewDesc(
c.lazyWritePagesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "lazy_write_pages_total"),
"(LazyWritePagesTotal)",
nil,
nil,
)
c.MDLReadHitsTotal = prometheus.NewDesc(
c.mdlReadHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mdl_read_hits_total"),
"(MDLReadHitsTotal)",
nil,
nil,
)
c.MDLReadsTotal = prometheus.NewDesc(
c.mdlReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mdl_reads_total"),
"(MDLReadsTotal)",
nil,
nil,
)
c.PinReadHitsTotal = prometheus.NewDesc(
c.pinReadHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pin_read_hits_total"),
"(PinReadHitsTotal)",
nil,
nil,
)
c.PinReadsTotal = prometheus.NewDesc(
c.pinReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pin_reads_total"),
"(PinReadsTotal)",
nil,
nil,
)
c.ReadAheadsTotal = prometheus.NewDesc(
c.readAheadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_aheads_total"),
"(ReadAheadsTotal)",
nil,
nil,
)
c.SyncCopyReadsTotal = prometheus.NewDesc(
c.syncCopyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sync_copy_reads_total"),
"(SyncCopyReadsTotal)",
nil,
nil,
)
c.SyncDataMapsTotal = prometheus.NewDesc(
c.syncDataMapsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sync_data_maps_total"),
"(SyncDataMapsTotal)",
nil,
nil,
)
c.SyncFastReadsTotal = prometheus.NewDesc(
c.syncFastReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sync_fast_reads_total"),
"(SyncFastReadsTotal)",
nil,
nil,
)
c.SyncMDLReadsTotal = prometheus.NewDesc(
c.syncMDLReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sync_mdl_reads_total"),
"(SyncMDLReadsTotal)",
nil,
nil,
)
c.SyncPinReadsTotal = prometheus.NewDesc(
c.syncPinReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sync_pin_reads_total"),
"(SyncPinReadsTotal)",
nil,
@@ -252,12 +266,14 @@ func (c *collector) Build() error {
return nil
}
// Collect implements the Collector interface
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
// Collect implements the Collector interface.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cache metrics", "err", err)
return err
}
return nil
}
@@ -269,23 +285,20 @@ type perflibCache struct {
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
CopyReadHitsTotal float64 `perflib:"Copy Read Hits/sec"`
CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
DataMapsTotal float64 `perflib:"Data Maps/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
FastReadsTotal float64 `perflib:"Fast Reads/sec"`
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
MDLReadHitsTotal float64 `perflib:"MDL Read Hits/sec"`
MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
PinReadHitsTotal float64 `perflib:"Pin Read Hits/sec"`
PinReadsTotal float64 `perflib:"Pin Reads/sec"`
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
@@ -293,187 +306,166 @@ type perflibCache struct {
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []perflibCache // Single-instance class, array is required but will have single entry.
if err := perflib.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, c.logger); err != nil {
return err
}
if len(dst) != 1 {
return errors.New("expected single instance of Cache")
}
ch <- prometheus.MustNewConstMetric(
c.AsyncCopyReadsTotal,
c.asyncCopyReadsTotal,
prometheus.CounterValue,
dst[0].AsyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncDataMapsTotal,
c.asyncDataMapsTotal,
prometheus.CounterValue,
dst[0].AsyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncFastReadsTotal,
c.asyncFastReadsTotal,
prometheus.CounterValue,
dst[0].AsyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncMDLReadsTotal,
c.asyncMDLReadsTotal,
prometheus.CounterValue,
dst[0].AsyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AsyncPinReadsTotal,
c.asyncPinReadsTotal,
prometheus.CounterValue,
dst[0].AsyncPinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.CopyReadHitsTotal,
prometheus.GaugeValue,
c.copyReadHitsTotal,
prometheus.CounterValue,
dst[0].CopyReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.CopyReadsTotal,
c.copyReadsTotal,
prometheus.CounterValue,
dst[0].CopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataFlushesTotal,
c.dataFlushesTotal,
prometheus.CounterValue,
dst[0].DataFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataFlushPagesTotal,
c.dataFlushPagesTotal,
prometheus.CounterValue,
dst[0].DataFlushPagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataMapHitsPercent,
c.dataMapPinsTotal,
prometheus.CounterValue,
dst[0].DataMapPinsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapsTotal,
prometheus.CounterValue,
dst[0].DataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadNotPossiblesTotal,
prometheus.CounterValue,
dst[0].FastReadNotPossiblesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadResourceMissesTotal,
prometheus.CounterValue,
dst[0].FastReadResourceMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadsTotal,
prometheus.CounterValue,
dst[0].FastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWriteFlushesTotal,
prometheus.CounterValue,
dst[0].LazyWriteFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWritePagesTotal,
prometheus.CounterValue,
dst[0].LazyWritePagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadHitsTotal,
prometheus.CounterValue,
dst[0].MDLReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadsTotal,
prometheus.CounterValue,
dst[0].MDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadHitsTotal,
prometheus.CounterValue,
dst[0].PinReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadsTotal,
prometheus.CounterValue,
dst[0].PinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.readAheadsTotal,
prometheus.CounterValue,
dst[0].ReadAheadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncCopyReadsTotal,
prometheus.CounterValue,
dst[0].SyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncDataMapsTotal,
prometheus.CounterValue,
dst[0].SyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncFastReadsTotal,
prometheus.CounterValue,
dst[0].SyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncMDLReadsTotal,
prometheus.CounterValue,
dst[0].SyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncPinReadsTotal,
prometheus.CounterValue,
dst[0].SyncPinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPages,
prometheus.GaugeValue,
dst[0].DirtyPages,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPageThreshold,
prometheus.GaugeValue,
dst[0].DirtyPageThreshold,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapHitsPercent,
prometheus.GaugeValue,
dst[0].DataMapHitsPercent,
)
ch <- prometheus.MustNewConstMetric(
c.DataMapPinsTotal,
prometheus.CounterValue,
dst[0].DataMapPinsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DataMapsTotal,
prometheus.CounterValue,
dst[0].DataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DirtyPages,
prometheus.GaugeValue,
dst[0].DirtyPages,
)
ch <- prometheus.MustNewConstMetric(
c.DirtyPageThreshold,
prometheus.GaugeValue,
dst[0].DirtyPageThreshold,
)
ch <- prometheus.MustNewConstMetric(
c.FastReadNotPossiblesTotal,
prometheus.CounterValue,
dst[0].FastReadNotPossiblesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FastReadResourceMissesTotal,
prometheus.CounterValue,
dst[0].FastReadResourceMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FastReadsTotal,
prometheus.CounterValue,
dst[0].FastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.LazyWriteFlushesTotal,
prometheus.CounterValue,
dst[0].LazyWriteFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.LazyWritePagesTotal,
prometheus.CounterValue,
dst[0].LazyWritePagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.MDLReadHitsTotal,
prometheus.CounterValue,
dst[0].MDLReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.MDLReadsTotal,
prometheus.CounterValue,
dst[0].MDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.PinReadHitsTotal,
prometheus.CounterValue,
dst[0].PinReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.PinReadsTotal,
prometheus.CounterValue,
dst[0].PinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ReadAheadsTotal,
prometheus.CounterValue,
dst[0].ReadAheadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncCopyReadsTotal,
prometheus.CounterValue,
dst[0].SyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncDataMapsTotal,
prometheus.CounterValue,
dst[0].SyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncFastReadsTotal,
prometheus.CounterValue,
dst[0].SyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncMDLReadsTotal,
prometheus.CounterValue,
dst[0].SyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.SyncPinReadsTotal,
prometheus.CounterValue,
dst[0].SyncPinReadsTotal,
)
return nil
}

12
pkg/collector/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,12 @@
package cache_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/cache"
"github.com/prometheus-community/windows_exporter/pkg/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, cache.Name, cache.NewWithFlags)
}

View File

@@ -3,12 +3,12 @@
package collector
import (
"errors"
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/collector/ad"
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs"
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs"
@@ -22,6 +22,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/diskdrive"
"github.com/prometheus-community/windows_exporter/pkg/collector/dns"
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota"
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
@@ -68,45 +69,44 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/types"
)
type Collectors struct {
logger log.Logger
collectors map[string]types.Collector
perfCounterQuery string
}
// NewWithFlags To be called by the exporter for collector initialization before running kingpin.Parse
// NewWithFlags To be called by the exporter for collector initialization before running kingpin.Parse.
func NewWithFlags(app *kingpin.Application) Collectors {
collectors := map[string]types.Collector{}
collectors := map[string]Collector{}
for name, builder := range Map {
for name, builder := range BuildersWithFlags {
collectors[name] = builder(app)
}
return New(collectors)
}
func NewBuilderWithFlags[C Collector](fn BuilderWithFlags[C]) BuilderWithFlags[Collector] {
return func(app *kingpin.Application) Collector {
return fn(app)
}
}
// NewWithConfig To be called by the external libraries for collector initialization without running kingpin.Parse
//
//goland:noinspection GoUnusedExportedFunction
func NewWithConfig(logger log.Logger, config Config) Collectors {
collectors := map[string]types.Collector{}
collectors[ad.Name] = ad.New(logger, &config.Ad)
collectors[adcs.Name] = adcs.New(logger, &config.Adcs)
collectors[adfs.Name] = adfs.New(logger, &config.Adfs)
collectors := map[string]Collector{}
collectors[ad.Name] = ad.New(logger, &config.AD)
collectors[adcs.Name] = adcs.New(logger, &config.ADCS)
collectors[adfs.Name] = adfs.New(logger, &config.ADFS)
collectors[cache.Name] = cache.New(logger, &config.Cache)
collectors[container.Name] = container.New(logger, &config.Container)
collectors[cpu.Name] = cpu.New(logger, &config.Cpu)
collectors[cpu_info.Name] = cpu_info.New(logger, &config.CpuInfo)
collectors[cpu.Name] = cpu.New(logger, &config.CPU)
collectors[cpu_info.Name] = cpu_info.New(logger, &config.CPUInfo)
collectors[cs.Name] = cs.New(logger, &config.Cs)
collectors[dfsr.Name] = dfsr.New(logger, &config.Dfsr)
collectors[dfsr.Name] = dfsr.New(logger, &config.DFSR)
collectors[dhcp.Name] = dhcp.New(logger, &config.Dhcp)
collectors[diskdrive.Name] = diskdrive.New(logger, &config.Diskdrive)
collectors[dns.Name] = dns.New(logger, &config.Dns)
collectors[diskdrive.Name] = diskdrive.New(logger, &config.DiskDrive)
collectors[dns.Name] = dns.New(logger, &config.DNS)
collectors[exchange.Name] = exchange.New(logger, &config.Exchange)
collectors[exchange.Name] = exchange.New(logger, &config.Fsrmquota)
collectors[fsrmquota.Name] = fsrmquota.New(logger, &config.Fsrmquota)
collectors[hyperv.Name] = hyperv.New(logger, &config.Hyperv)
collectors[iis.Name] = iis.New(logger, &config.Iis)
collectors[iis.Name] = iis.New(logger, &config.IIS)
collectors[license.Name] = license.New(logger, &config.License)
collectors[logical_disk.Name] = logical_disk.New(logger, &config.LogicalDisk)
collectors[logon.Name] = logon.New(logger, &config.Logon)
@@ -135,12 +135,12 @@ func NewWithConfig(logger log.Logger, config Config) Collectors {
collectors[remote_fx.Name] = remote_fx.New(logger, &config.RemoteFx)
collectors[scheduled_task.Name] = scheduled_task.New(logger, &config.ScheduledTask)
collectors[service.Name] = service.New(logger, &config.Service)
collectors[smb.Name] = smb.New(logger, &config.Smb)
collectors[smbclient.Name] = smbclient.New(logger, &config.SmbClient)
collectors[smtp.Name] = smtp.New(logger, &config.Smtp)
collectors[smb.Name] = smb.New(logger, &config.SMB)
collectors[smbclient.Name] = smbclient.New(logger, &config.SMBClient)
collectors[smtp.Name] = smtp.New(logger, &config.SMTP)
collectors[system.Name] = system.New(logger, &config.System)
collectors[teradici_pcoip.Name] = teradici_pcoip.New(logger, &config.TeradiciPcoip)
collectors[tcp.Name] = tcp.New(logger, &config.Tcp)
collectors[tcp.Name] = tcp.New(logger, &config.TCP)
collectors[terminal_services.Name] = terminal_services.New(logger, &config.TerminalServices)
collectors[textfile.Name] = textfile.New(logger, &config.Textfile)
collectors[thermalzone.Name] = thermalzone.New(logger, &config.Thermalzone)
@@ -151,8 +151,8 @@ func NewWithConfig(logger log.Logger, config Config) Collectors {
return New(collectors)
}
// New To be called by the external libraries for collector initialization
func New(collectors map[string]types.Collector) Collectors {
// New To be called by the external libraries for collector initialization.
func New(collectors Map) Collectors {
return Collectors{
collectors: collectors,
}
@@ -195,7 +195,7 @@ func (c *Collectors) SetPerfCounterQuery() error {
return nil
}
// Enable removes all collectors that not enabledCollectors
// Enable removes all collectors that not enabledCollectors.
func (c *Collectors) Enable(enabledCollectors []string) {
for name := range c.collectors {
if !slices.Contains(enabledCollectors, name) {
@@ -204,9 +204,10 @@ func (c *Collectors) Enable(enabledCollectors []string) {
}
}
// Build To be called by the exporter for collector initialization
// Build To be called by the exporter for collector initialization.
func (c *Collectors) Build() error {
var err error
for _, collector := range c.collectors {
if err = collector.Build(); err != nil {
return err
@@ -216,7 +217,7 @@ func (c *Collectors) Build() error {
return nil
}
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape.
func (c *Collectors) PrepareScrapeContext() (*types.ScrapeContext, error) {
objs, err := perflib.GetPerflibSnapshot(c.perfCounterQuery)
if err != nil {
@@ -225,3 +226,16 @@ func (c *Collectors) PrepareScrapeContext() (*types.ScrapeContext, error) {
return &types.ScrapeContext{PerfObjects: objs}, nil
}
// Close To be called by the exporter for collector cleanup.
func (c *Collectors) Close() error {
errs := make([]error, 0, len(c.collectors))
for _, collector := range c.collectors {
if err := collector.Build(); err != nil {
errs = append(errs, err)
}
}
return errors.Join(errs...)
}

View File

@@ -14,6 +14,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/diskdrive"
"github.com/prometheus-community/windows_exporter/pkg/collector/dns"
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota"
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
@@ -59,22 +60,22 @@ import (
)
type Config struct {
Ad ad.Config `yaml:"ad"`
Adcs adcs.Config `yaml:"adcs"`
Adfs adfs.Config `yaml:"adfs"`
AD ad.Config `yaml:"ad"`
ADCS adcs.Config `yaml:"adcs"`
ADFS adfs.Config `yaml:"adfs"`
Cache cache.Config `yaml:"cache"`
Container container.Config `yaml:"container"`
Cpu cpu.Config `yaml:"cpu"`
CpuInfo cpu_info.Config `yaml:"cpu_info"`
CPU cpu.Config `yaml:"cpu"`
CPUInfo cpu_info.Config `yaml:"cpu_info"`
Cs cs.Config `yaml:"cs"`
Dfsr dfsr.Config `yaml:"dfsr"`
DFSR dfsr.Config `yaml:"dfsr"`
Dhcp dhcp.Config `yaml:"dhcp"`
Diskdrive diskdrive.Config `yaml:"diskdrive"`
Dns dns.Config `yaml:"dns"`
DiskDrive diskdrive.Config `yaml:"diskdrive"` //nolint:tagliatelle
DNS dns.Config `yaml:"dns"`
Exchange exchange.Config `yaml:"exchange"`
Fsrmquota exchange.Config `yaml:"fsrmquota"`
Fsrmquota fsrmquota.Config `yaml:"fsrmquota"`
Hyperv hyperv.Config `yaml:"hyperv"`
Iis iis.Config `yaml:"iis"`
IIS iis.Config `yaml:"iis"`
License license.Config `yaml:"license"`
LogicalDisk logical_disk.Config `yaml:"logical_disk"`
Logon logon.Config `yaml:"logon"`
@@ -83,7 +84,7 @@ type Config struct {
MsclusterNetwork mscluster_network.Config `yaml:"mscluster_network"`
MsclusterNode mscluster_node.Config `yaml:"mscluster_node"`
MsclusterResource mscluster_resource.Config `yaml:"mscluster_resource"`
MsclusterResourceGroup mscluster_resourcegroup.Config `yaml:"mscluster_resourcegroup"`
MsclusterResourceGroup mscluster_resourcegroup.Config `yaml:"mscluster_resourcegroup"` //nolint:tagliatelle
Msmq msmq.Config `yaml:"msmq"`
Mssql mssql.Config `yaml:"mssql"`
Net net.Config `yaml:"net"`
@@ -103,12 +104,12 @@ type Config struct {
RemoteFx remote_fx.Config `yaml:"remote_fx"`
ScheduledTask scheduled_task.Config `yaml:"scheduled_task"`
Service service.Config `yaml:"service"`
Smb smb.Config `yaml:"smb"`
SmbClient smbclient.Config `yaml:"smbclient"`
Smtp smtp.Config `yaml:"smtp"`
SMB smb.Config `yaml:"smb"`
SMBClient smbclient.Config `yaml:"smbclient"` //nolint:tagliatelle
SMTP smtp.Config `yaml:"smtp"`
System system.Config `yaml:"system"`
TeradiciPcoip teradici_pcoip.Config `yaml:"teradici_pcoip"`
Tcp tcp.Config `yaml:"tcp"`
TCP tcp.Config `yaml:"tcp"`
TerminalServices terminal_services.Config `yaml:"terminal_services"`
Textfile textfile.Config `yaml:"textfile"`
Thermalzone thermalzone.Config `yaml:"thermalzone"`
@@ -121,22 +122,22 @@ type Config struct {
//
//goland:noinspection GoUnusedGlobalVariable
var ConfigDefaults = Config{
Ad: ad.ConfigDefaults,
Adcs: adcs.ConfigDefaults,
Adfs: adfs.ConfigDefaults,
AD: ad.ConfigDefaults,
ADCS: adcs.ConfigDefaults,
ADFS: adfs.ConfigDefaults,
Cache: cache.ConfigDefaults,
Container: container.ConfigDefaults,
Cpu: cpu.ConfigDefaults,
CpuInfo: cpu_info.ConfigDefaults,
CPU: cpu.ConfigDefaults,
CPUInfo: cpu_info.ConfigDefaults,
Cs: cs.ConfigDefaults,
Dfsr: dfsr.ConfigDefaults,
DFSR: dfsr.ConfigDefaults,
Dhcp: dhcp.ConfigDefaults,
Diskdrive: diskdrive.ConfigDefaults,
Dns: dns.ConfigDefaults,
DiskDrive: diskdrive.ConfigDefaults,
DNS: dns.ConfigDefaults,
Exchange: exchange.ConfigDefaults,
Fsrmquota: exchange.ConfigDefaults,
Fsrmquota: fsrmquota.ConfigDefaults,
Hyperv: hyperv.ConfigDefaults,
Iis: iis.ConfigDefaults,
IIS: iis.ConfigDefaults,
License: license.ConfigDefaults,
LogicalDisk: logical_disk.ConfigDefaults,
Logon: logon.ConfigDefaults,
@@ -165,12 +166,12 @@ var ConfigDefaults = Config{
RemoteFx: remote_fx.ConfigDefaults,
ScheduledTask: scheduled_task.ConfigDefaults,
Service: service.ConfigDefaults,
Smb: smb.ConfigDefaults,
SmbClient: smbclient.ConfigDefaults,
Smtp: smtp.ConfigDefaults,
SMB: smb.ConfigDefaults,
SMBClient: smbclient.ConfigDefaults,
SMTP: smtp.ConfigDefaults,
System: system.ConfigDefaults,
TeradiciPcoip: teradici_pcoip.ConfigDefaults,
Tcp: tcp.ConfigDefaults,
TCP: tcp.ConfigDefaults,
TerminalServices: terminal_services.ConfigDefaults,
Textfile: textfile.ConfigDefaults,
Thermalzone: thermalzone.ConfigDefaults,

View File

@@ -20,167 +20,181 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for containers metrics
type collector struct {
// A Collector is a Prometheus Collector for containers metrics.
type Collector struct {
config Config
logger log.Logger
// Presence
ContainerAvailable *prometheus.Desc
containerAvailable *prometheus.Desc
// Number of containers
ContainersCount *prometheus.Desc
// memory
UsageCommitBytes *prometheus.Desc
UsageCommitPeakBytes *prometheus.Desc
UsagePrivateWorkingSetBytes *prometheus.Desc
containersCount *prometheus.Desc
// Memory
usageCommitBytes *prometheus.Desc
usageCommitPeakBytes *prometheus.Desc
usagePrivateWorkingSetBytes *prometheus.Desc
// CPU
RuntimeTotal *prometheus.Desc
RuntimeUser *prometheus.Desc
RuntimeKernel *prometheus.Desc
runtimeTotal *prometheus.Desc
runtimeUser *prometheus.Desc
runtimeKernel *prometheus.Desc
// Network
BytesReceived *prometheus.Desc
BytesSent *prometheus.Desc
PacketsReceived *prometheus.Desc
PacketsSent *prometheus.Desc
DroppedPacketsIncoming *prometheus.Desc
DroppedPacketsOutgoing *prometheus.Desc
bytesReceived *prometheus.Desc
bytesSent *prometheus.Desc
packetsReceived *prometheus.Desc
packetsSent *prometheus.Desc
droppedPacketsIncoming *prometheus.Desc
droppedPacketsOutgoing *prometheus.Desc
// Storage
ReadCountNormalized *prometheus.Desc
ReadSizeBytes *prometheus.Desc
WriteCountNormalized *prometheus.Desc
WriteSizeBytes *prometheus.Desc
readCountNormalized *prometheus.Desc
readSizeBytes *prometheus.Desc
writeCountNormalized *prometheus.Desc
writeSizeBytes *prometheus.Desc
}
// New constructs a new collector
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
// New constructs a new Collector.
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.ContainerAvailable = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.containerAvailable = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available"),
"Available",
[]string{"container_id"},
nil,
)
c.ContainersCount = prometheus.NewDesc(
c.containersCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "count"),
"Number of containers",
nil,
nil,
)
c.UsageCommitBytes = prometheus.NewDesc(
c.usageCommitBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memory_usage_commit_bytes"),
"Memory Usage Commit Bytes",
[]string{"container_id"},
nil,
)
c.UsageCommitPeakBytes = prometheus.NewDesc(
c.usageCommitPeakBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memory_usage_commit_peak_bytes"),
"Memory Usage Commit Peak Bytes",
[]string{"container_id"},
nil,
)
c.UsagePrivateWorkingSetBytes = prometheus.NewDesc(
c.usagePrivateWorkingSetBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memory_usage_private_working_set_bytes"),
"Memory Usage Private Working Set Bytes",
[]string{"container_id"},
nil,
)
c.RuntimeTotal = prometheus.NewDesc(
c.runtimeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_usage_seconds_total"),
"Total Run time in Seconds",
[]string{"container_id"},
nil,
)
c.RuntimeUser = prometheus.NewDesc(
c.runtimeUser = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_usage_seconds_usermode"),
"Run Time in User mode in Seconds",
[]string{"container_id"},
nil,
)
c.RuntimeKernel = prometheus.NewDesc(
c.runtimeKernel = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_usage_seconds_kernelmode"),
"Run time in Kernel mode in Seconds",
[]string{"container_id"},
nil,
)
c.BytesReceived = prometheus.NewDesc(
c.bytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "network_receive_bytes_total"),
"Bytes Received on Interface",
[]string{"container_id", "interface"},
nil,
)
c.BytesSent = prometheus.NewDesc(
c.bytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "network_transmit_bytes_total"),
"Bytes Sent on Interface",
[]string{"container_id", "interface"},
nil,
)
c.PacketsReceived = prometheus.NewDesc(
c.packetsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "network_receive_packets_total"),
"Packets Received on Interface",
[]string{"container_id", "interface"},
nil,
)
c.PacketsSent = prometheus.NewDesc(
c.packetsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "network_transmit_packets_total"),
"Packets Sent on Interface",
[]string{"container_id", "interface"},
nil,
)
c.DroppedPacketsIncoming = prometheus.NewDesc(
c.droppedPacketsIncoming = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "network_receive_packets_dropped_total"),
"Dropped Incoming Packets on Interface",
[]string{"container_id", "interface"},
nil,
)
c.DroppedPacketsOutgoing = prometheus.NewDesc(
c.droppedPacketsOutgoing = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "network_transmit_packets_dropped_total"),
"Dropped Outgoing Packets on Interface",
[]string{"container_id", "interface"},
nil,
)
c.ReadCountNormalized = prometheus.NewDesc(
c.readCountNormalized = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "storage_read_count_normalized_total"),
"Read Count Normalized",
[]string{"container_id"},
nil,
)
c.ReadSizeBytes = prometheus.NewDesc(
c.readSizeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "storage_read_size_bytes_total"),
"Read Size Bytes",
[]string{"container_id"},
nil,
)
c.WriteCountNormalized = prometheus.NewDesc(
c.writeCountNormalized = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "storage_write_count_normalized_total"),
"Write Count Normalized",
[]string{"container_id"},
nil,
)
c.WriteSizeBytes = prometheus.NewDesc(
c.writeSizeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "storage_write_size_bytes_total"),
"Write Size Bytes",
[]string{"container_id"},
@@ -191,7 +205,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting collector metrics", "err", err)
return err
@@ -199,15 +213,15 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil
}
// containerClose closes the container resource
func (c *collector) containerClose(container hcsshim.Container) {
// containerClose closes the container resource.
func (c *Collector) containerClose(container hcsshim.Container) {
err := container.Close()
if err != nil {
_ = level.Error(c.logger).Log("err", err)
}
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
// Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil {
@@ -218,7 +232,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
count := len(containers)
ch <- prometheus.MustNewConstMetric(
c.ContainersCount,
c.containersCount,
prometheus.GaugeValue,
float64(count),
)
@@ -250,67 +264,67 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
containerPrefixes[containerDetails.ID] = containerIdWithPrefix
ch <- prometheus.MustNewConstMetric(
c.ContainerAvailable,
c.containerAvailable,
prometheus.CounterValue,
1,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.UsageCommitBytes,
c.usageCommitBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsageCommitBytes),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.UsageCommitPeakBytes,
c.usageCommitPeakBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsageCommitPeakBytes),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.UsagePrivateWorkingSetBytes,
c.usagePrivateWorkingSetBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeTotal,
c.runtimeTotal,
prometheus.CounterValue,
float64(cstats.Processor.TotalRuntime100ns)*perflib.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeUser,
c.runtimeUser,
prometheus.CounterValue,
float64(cstats.Processor.RuntimeUser100ns)*perflib.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeKernel,
c.runtimeKernel,
prometheus.CounterValue,
float64(cstats.Processor.RuntimeKernel100ns)*perflib.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.ReadCountNormalized,
c.readCountNormalized,
prometheus.CounterValue,
float64(cstats.Storage.ReadCountNormalized),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.ReadSizeBytes,
c.readSizeBytes,
prometheus.CounterValue,
float64(cstats.Storage.ReadSizeBytes),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.WriteCountNormalized,
c.writeCountNormalized,
prometheus.CounterValue,
float64(cstats.Storage.WriteCountNormalized),
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.WriteSizeBytes,
c.writeSizeBytes,
prometheus.CounterValue,
float64(cstats.Storage.WriteSizeBytes),
containerIdWithPrefix,
@@ -346,38 +360,38 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.BytesReceived,
c.bytesReceived,
prometheus.CounterValue,
float64(endpointStats.BytesReceived),
containerIdWithPrefix, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.BytesSent,
c.bytesSent,
prometheus.CounterValue,
float64(endpointStats.BytesSent),
containerIdWithPrefix, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceived,
c.packetsReceived,
prometheus.CounterValue,
float64(endpointStats.PacketsReceived),
containerIdWithPrefix, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsSent,
c.packetsSent,
prometheus.CounterValue,
float64(endpointStats.PacketsSent),
containerIdWithPrefix, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.DroppedPacketsIncoming,
c.droppedPacketsIncoming,
prometheus.CounterValue,
float64(endpointStats.DroppedPacketsIncoming),
containerIdWithPrefix, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.DroppedPacketsOutgoing,
c.droppedPacketsOutgoing,
prometheus.CounterValue,
float64(endpointStats.DroppedPacketsOutgoing),
containerIdWithPrefix, endpointId,

View File

@@ -19,71 +19,82 @@ type Config struct{}
var ConfigDefaults = Config{}
type collector struct {
type Collector struct {
config Config
logger log.Logger
CStateSecondsTotal *prometheus.Desc
TimeTotal *prometheus.Desc
InterruptsTotal *prometheus.Desc
DPCsTotal *prometheus.Desc
ClockInterruptsTotal *prometheus.Desc
IdleBreakEventsTotal *prometheus.Desc
ParkingStatus *prometheus.Desc
ProcessorFrequencyMHz *prometheus.Desc
ProcessorMaxFrequencyMHz *prometheus.Desc
ProcessorPerformance *prometheus.Desc
ProcessorMPerf *prometheus.Desc
ProcessorRTC *prometheus.Desc
ProcessorUtility *prometheus.Desc
ProcessorPrivUtility *prometheus.Desc
cStateSecondsTotal *prometheus.Desc
timeTotal *prometheus.Desc
interruptsTotal *prometheus.Desc
dpcsTotal *prometheus.Desc
clockInterruptsTotal *prometheus.Desc
idleBreakEventsTotal *prometheus.Desc
parkingStatus *prometheus.Desc
processorFrequencyMHz *prometheus.Desc
processorPerformance *prometheus.Desc
processorMPerf *prometheus.Desc
processorRTC *prometheus.Desc
processorUtility *prometheus.Desc
processorPrivilegedUtility *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
if winversion.WindowsVersionFloat > 6.05 {
return []string{"Processor Information"}, nil
}
return []string{"Processor"}, nil
}
func (c *collector) Build() error {
c.CStateSecondsTotal = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.cStateSecondsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"),
"Time spent in low-power idle state",
[]string{"core", "state"},
nil,
)
c.TimeTotal = prometheus.NewDesc(
c.timeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "time_total"),
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
[]string{"core", "mode"},
nil,
)
c.InterruptsTotal = prometheus.NewDesc(
c.interruptsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "interrupts_total"),
"Total number of received and serviced hardware interrupts",
[]string{"core"},
nil,
)
c.DPCsTotal = prometheus.NewDesc(
c.dpcsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dpcs_total"),
"Total number of received and serviced deferred procedure calls (DPCs)",
[]string{"core"},
@@ -100,79 +111,79 @@ func (c *collector) Build() error {
return nil
}
c.CStateSecondsTotal = prometheus.NewDesc(
c.cStateSecondsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"),
"Time spent in low-power idle state",
[]string{"core", "state"},
nil,
)
c.TimeTotal = prometheus.NewDesc(
c.timeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "time_total"),
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
[]string{"core", "mode"},
nil,
)
c.InterruptsTotal = prometheus.NewDesc(
c.interruptsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "interrupts_total"),
"Total number of received and serviced hardware interrupts",
[]string{"core"},
nil,
)
c.DPCsTotal = prometheus.NewDesc(
c.dpcsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dpcs_total"),
"Total number of received and serviced deferred procedure calls (DPCs)",
[]string{"core"},
nil,
)
c.ClockInterruptsTotal = prometheus.NewDesc(
c.clockInterruptsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "clock_interrupts_total"),
"Total number of received and serviced clock tick interrupts",
[]string{"core"},
nil,
)
c.IdleBreakEventsTotal = prometheus.NewDesc(
c.idleBreakEventsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "idle_break_events_total"),
"Total number of time processor was woken from idle",
[]string{"core"},
nil,
)
c.ParkingStatus = prometheus.NewDesc(
c.parkingStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "parking_status"),
"Parking Status represents whether a processor is parked or not",
[]string{"core"},
nil,
)
c.ProcessorFrequencyMHz = prometheus.NewDesc(
c.processorFrequencyMHz = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "core_frequency_mhz"),
"Core frequency in megahertz",
[]string{"core"},
nil,
)
c.ProcessorPerformance = prometheus.NewDesc(
c.processorPerformance = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processor_performance_total"),
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
[]string{"core"},
nil,
)
c.ProcessorMPerf = prometheus.NewDesc(
c.processorMPerf = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processor_mperf_total"),
"Processor MPerf is the number of TSC ticks incremented while executing instructions",
[]string{"core"},
nil,
)
c.ProcessorRTC = prometheus.NewDesc(
c.processorRTC = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processor_rtc_total"),
"Processor RTC represents the number of RTC ticks made since the system booted. It should consistently be 64e6, and can be used to properly derive Processor Utility Rate",
[]string{"core"},
nil,
)
c.ProcessorUtility = prometheus.NewDesc(
c.processorUtility = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processor_utility_total"),
"Processor Utility represents is the amount of time the core spends executing instructions",
[]string{"core"},
nil,
)
c.ProcessorPrivUtility = prometheus.NewDesc(
c.processorPrivilegedUtility = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processor_privileged_utility_total"),
"Processor Privileged Utility represents is the amount of time the core has spent executing instructions inside the kernel",
[]string{"core"},
@@ -182,7 +193,7 @@ func (c *collector) Build() error {
return nil
}
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if winversion.WindowsVersionFloat > 6.05 {
return c.CollectFull(ctx, ch)
}
@@ -209,7 +220,7 @@ type perflibProcessor struct {
PercentUserTime float64 `perflib:"% User Time"`
}
func (c *collector) CollectBasic(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) CollectBasic(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessor, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Processor"], &data, c.logger)
if err != nil {
@@ -223,63 +234,63 @@ func (c *collector) CollectBasic(ctx *types.ScrapeContext, ch chan<- prometheus.
core := cpu.Name
ch <- prometheus.MustNewConstMetric(
c.CStateSecondsTotal,
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.PercentC1Time,
core, "c1",
)
ch <- prometheus.MustNewConstMetric(
c.CStateSecondsTotal,
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.PercentC2Time,
core, "c2",
)
ch <- prometheus.MustNewConstMetric(
c.CStateSecondsTotal,
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.PercentC3Time,
core, "c3",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.PercentIdleTime,
core, "idle",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.PercentInterruptTime,
core, "interrupt",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.PercentDPCTime,
core, "dpc",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.PercentPrivilegedTime,
core, "privileged",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.PercentUserTime,
core, "user",
)
ch <- prometheus.MustNewConstMetric(
c.InterruptsTotal,
c.interruptsTotal,
prometheus.CounterValue,
cpu.Interrupts,
core,
)
ch <- prometheus.MustNewConstMetric(
c.DPCsTotal,
c.dpcsTotal,
prometheus.CounterValue,
cpu.DPCsQueued,
core,
@@ -318,7 +329,7 @@ type perflibProcessorInformation struct {
UserTimeSeconds float64 `perflib:"% User Time"`
}
func (c *collector) CollectFull(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) CollectFull(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessorInformation, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Processor Information"], &data, c.logger)
if err != nil {
@@ -332,119 +343,119 @@ func (c *collector) CollectFull(ctx *types.ScrapeContext, ch chan<- prometheus.M
core := cpu.Name
ch <- prometheus.MustNewConstMetric(
c.CStateSecondsTotal,
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.C1TimeSeconds,
core, "c1",
)
ch <- prometheus.MustNewConstMetric(
c.CStateSecondsTotal,
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.C2TimeSeconds,
core, "c2",
)
ch <- prometheus.MustNewConstMetric(
c.CStateSecondsTotal,
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.C3TimeSeconds,
core, "c3",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.IdleTimeSeconds,
core, "idle",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.InterruptTimeSeconds,
core, "interrupt",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.DPCTimeSeconds,
core, "dpc",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.PrivilegedTimeSeconds,
core, "privileged",
)
ch <- prometheus.MustNewConstMetric(
c.TimeTotal,
c.timeTotal,
prometheus.CounterValue,
cpu.UserTimeSeconds,
core, "user",
)
ch <- prometheus.MustNewConstMetric(
c.InterruptsTotal,
c.interruptsTotal,
prometheus.CounterValue,
cpu.InterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.DPCsTotal,
c.dpcsTotal,
prometheus.CounterValue,
cpu.DPCsQueuedTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ClockInterruptsTotal,
c.clockInterruptsTotal,
prometheus.CounterValue,
cpu.ClockInterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.IdleBreakEventsTotal,
c.idleBreakEventsTotal,
prometheus.CounterValue,
cpu.IdleBreakEventsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ParkingStatus,
c.parkingStatus,
prometheus.GaugeValue,
cpu.ParkingStatus,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorFrequencyMHz,
c.processorFrequencyMHz,
prometheus.GaugeValue,
cpu.ProcessorFrequencyMHz,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorPerformance,
c.processorPerformance,
prometheus.CounterValue,
cpu.ProcessorPerformance,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorMPerf,
c.processorMPerf,
prometheus.CounterValue,
cpu.ProcessorMPerf,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorRTC,
c.processorRTC,
prometheus.CounterValue,
cpu.ProcessorRTC,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorUtility,
c.processorUtility,
prometheus.CounterValue,
cpu.ProcessorUtilityRate,
core,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorPrivUtility,
c.processorPrivilegedUtility,
prometheus.CounterValue,
cpu.PrivilegedUtilitySeconds,
core,

View File

@@ -25,39 +25,52 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for a few WMI metrics in Win32_Processor
type collector struct {
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_Processor.
type Collector struct {
config Config
logger log.Logger
CpuInfo *prometheus.Desc
cpuInfo *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.CpuInfo = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.cpuInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "", Name),
"Labelled CPU information as provided provided by Win32_Processor",
"Labelled CPU information as provided by Win32_Processor",
[]string{
"architecture",
"device_id",
@@ -85,7 +98,7 @@ type win32_Processor struct {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "err", err)
return err
@@ -93,7 +106,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32_Processor
// We use a static query here because the provided methods in wmi.go all issue a SELECT *;
// This results in the time-consuming LoadPercentage field being read which seems to measure each CPU
@@ -108,7 +121,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
// Some CPUs end up exposing trailing spaces for certain strings, so clean them up
for _, processor := range dst {
ch <- prometheus.MustNewConstMetric(
c.CpuInfo,
c.cpuInfo,
prometheus.GaugeValue,
1.0,
strconv.Itoa(int(processor.Architecture)),

View File

@@ -0,0 +1,12 @@
package cpu_info_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu_info"
"github.com/prometheus-community/windows_exporter/pkg/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, cpu_info.Name, cpu_info.NewWithFlags)
}

View File

@@ -17,51 +17,64 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI metrics.
type Collector struct {
config Config
logger log.Logger
PhysicalMemoryBytes *prometheus.Desc
LogicalProcessors *prometheus.Desc
Hostname *prometheus.Desc
physicalMemoryBytes *prometheus.Desc
logicalProcessors *prometheus.Desc
hostname *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.LogicalProcessors = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.logicalProcessors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logical_processors"),
"ComputerSystem.NumberOfLogicalProcessors",
nil,
nil,
)
c.PhysicalMemoryBytes = prometheus.NewDesc(
c.physicalMemoryBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "physical_memory_bytes"),
"ComputerSystem.TotalPhysicalMemory",
nil,
nil,
)
c.Hostname = prometheus.NewDesc(
c.hostname = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "hostname"),
"Labelled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain",
[]string{
@@ -76,7 +89,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cs metrics", "err", err)
return err
@@ -84,7 +97,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
// Get systeminfo for number of processors
systemInfo := sysinfoapi.GetSystemInfo()
@@ -95,13 +108,13 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.LogicalProcessors,
c.logicalProcessors,
prometheus.GaugeValue,
float64(systemInfo.NumberOfProcessors),
)
ch <- prometheus.MustNewConstMetric(
c.PhysicalMemoryBytes,
c.physicalMemoryBytes,
prometheus.GaugeValue,
float64(mem.TotalPhys),
)
@@ -120,7 +133,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.Hostname,
c.hostname,
prometheus.GaugeValue,
1.0,
hostname,

View File

@@ -3,89 +3,87 @@
package dfsr
import (
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "dfsr"
FlagDfsrEnabledCollectors = "collectors.dfsr.sources-enabled"
)
const Name = "dfsr"
type Config struct {
DfsrEnabledCollectors string `yaml:"enabled_collectors"`
CollectorsEnabled []string `yaml:"collectors_enabled"`
}
var ConfigDefaults = Config{
DfsrEnabledCollectors: "connection,folder,volume",
CollectorsEnabled: []string{"connection", "folder", "volume"},
}
// collector contains the metric and state data of the DFSR collectors.
type collector struct {
// Collector contains the metric and state data of the DFSR collectors.
type Collector struct {
config Config
logger log.Logger
dfsrEnabledCollectors *string
// connection source
connectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
connectionBytesReceivedTotal *prometheus.Desc
connectionCompressedSizeOfFilesReceivedTotal *prometheus.Desc
connectionFilesReceivedTotal *prometheus.Desc
connectionRDCBytesReceivedTotal *prometheus.Desc
connectionRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
connectionRDCSizeOfFilesReceivedTotal *prometheus.Desc
connectionRDCNumberofFilesReceivedTotal *prometheus.Desc
connectionSizeOfFilesReceivedTotal *prometheus.Desc
// Connection source
ConnectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
ConnectionBytesReceivedTotal *prometheus.Desc
ConnectionCompressedSizeOfFilesReceivedTotal *prometheus.Desc
ConnectionFilesReceivedTotal *prometheus.Desc
ConnectionRDCBytesReceivedTotal *prometheus.Desc
ConnectionRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
ConnectionRDCSizeOfFilesReceivedTotal *prometheus.Desc
ConnectionRDCNumberofFilesReceivedTotal *prometheus.Desc
ConnectionSizeOfFilesReceivedTotal *prometheus.Desc
// folder source
folderBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
folderCompressedSizeOfFilesReceivedTotal *prometheus.Desc
folderConflictBytesCleanedupTotal *prometheus.Desc
folderConflictBytesGeneratedTotal *prometheus.Desc
folderConflictFilesCleanedUpTotal *prometheus.Desc
folderConflictFilesGeneratedTotal *prometheus.Desc
folderConflictfolderCleanupsCompletedTotal *prometheus.Desc
folderConflictSpaceInUse *prometheus.Desc
folderDeletedSpaceInUse *prometheus.Desc
folderDeletedBytesCleanedUpTotal *prometheus.Desc
folderDeletedBytesGeneratedTotal *prometheus.Desc
folderDeletedFilesCleanedUpTotal *prometheus.Desc
folderDeletedFilesGeneratedTotal *prometheus.Desc
folderFileInstallsRetriedTotal *prometheus.Desc
folderFileInstallsSucceededTotal *prometheus.Desc
folderFilesReceivedTotal *prometheus.Desc
folderRDCBytesReceivedTotal *prometheus.Desc
folderRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
folderRDCNumberofFilesReceivedTotal *prometheus.Desc
folderRDCSizeOfFilesReceivedTotal *prometheus.Desc
folderSizeOfFilesReceivedTotal *prometheus.Desc
folderStagingSpaceInUse *prometheus.Desc
folderStagingBytesCleanedUpTotal *prometheus.Desc
folderStagingBytesGeneratedTotal *prometheus.Desc
folderStagingFilesCleanedUpTotal *prometheus.Desc
folderStagingFilesGeneratedTotal *prometheus.Desc
folderUpdatesDroppedTotal *prometheus.Desc
// Folder source
FolderBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
FolderCompressedSizeOfFilesReceivedTotal *prometheus.Desc
FolderConflictBytesCleanedupTotal *prometheus.Desc
FolderConflictBytesGeneratedTotal *prometheus.Desc
FolderConflictFilesCleanedUpTotal *prometheus.Desc
FolderConflictFilesGeneratedTotal *prometheus.Desc
FolderConflictFolderCleanupsCompletedTotal *prometheus.Desc
FolderConflictSpaceInUse *prometheus.Desc
FolderDeletedSpaceInUse *prometheus.Desc
FolderDeletedBytesCleanedUpTotal *prometheus.Desc
FolderDeletedBytesGeneratedTotal *prometheus.Desc
FolderDeletedFilesCleanedUpTotal *prometheus.Desc
FolderDeletedFilesGeneratedTotal *prometheus.Desc
FolderFileInstallsRetriedTotal *prometheus.Desc
FolderFileInstallsSucceededTotal *prometheus.Desc
FolderFilesReceivedTotal *prometheus.Desc
FolderRDCBytesReceivedTotal *prometheus.Desc
FolderRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
FolderRDCNumberofFilesReceivedTotal *prometheus.Desc
FolderRDCSizeOfFilesReceivedTotal *prometheus.Desc
FolderSizeOfFilesReceivedTotal *prometheus.Desc
FolderStagingSpaceInUse *prometheus.Desc
FolderStagingBytesCleanedUpTotal *prometheus.Desc
FolderStagingBytesGeneratedTotal *prometheus.Desc
FolderStagingFilesCleanedUpTotal *prometheus.Desc
FolderStagingFilesGeneratedTotal *prometheus.Desc
FolderUpdatesDroppedTotal *prometheus.Desc
// volume source
volumeDatabaseLookupsTotal *prometheus.Desc
volumeDatabaseCommitsTotal *prometheus.Desc
volumeUSNJournalUnreadPercentage *prometheus.Desc
volumeUSNJournalRecordsAcceptedTotal *prometheus.Desc
volumeUSNJournalRecordsReadTotal *prometheus.Desc
// Volume source
VolumeDatabaseLookupsTotal *prometheus.Desc
VolumeDatabaseCommitsTotal *prometheus.Desc
VolumeUSNJournalUnreadPercentage *prometheus.Desc
VolumeUSNJournalRecordsAcceptedTotal *prometheus.Desc
VolumeUSNJournalRecordsReadTotal *prometheus.Desc
// Map of child collector functions used during collection
// Map of child Collector functions used during collection
dfsrChildCollectors []dfsrCollectorFunc
}
type dfsrCollectorFunc func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error
// Map Perflib sources to DFSR collector names
// e.g, volume -> DFS Replication Service Volumes
// Map Perflib sources to DFSR Collector names
// e.g, volume -> DFS Replication Service Volumes.
func dfsrGetPerfObjectName(collector string) string {
prefix := "DFS "
suffix := ""
@@ -100,38 +98,56 @@ func dfsrGetPerfObjectName(collector string) string {
return prefix + suffix
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
dfsrEnabledCollectors: &config.DfsrEnabledCollectors,
if config.CollectorsEnabled == nil {
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
dfsrEnabledCollectors: app.
Flag(FlagDfsrEnabledCollectors, "Comma-seperated list of DFSR Perflib sources to use.").Default("connection,folder,volume").
Default(ConfigDefaults.DfsrEnabledCollectors).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var collectorsEnabled string
app.Flag("collectors.dfsr.sources-enabled", "Comma-separated list of DFSR Perflib sources to use.").
Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.Action(func(*kingpin.ParseContext) error {
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
// Perflib sources are dynamic, depending on the enabled child collectors
expandedChildCollectors := utils.ExpandEnabledChildCollectors(*c.dfsrEnabledCollectors)
expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled)
perflibDependencies := make([]string, 0, len(expandedChildCollectors))
for _, source := range expandedChildCollectors {
perflibDependencies = append(perflibDependencies, dfsrGetPerfObjectName(source))
}
@@ -139,307 +155,313 @@ func (c *collector) GetPerfCounter() ([]string, error) {
return perflibDependencies, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
_ = level.Info(c.logger).Log("msg", "dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
// Connection
c.ConnectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
// connection
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
"Total bytes of bandwidth saved using DFS Replication for this connection",
[]string{"name"},
nil,
)
c.ConnectionBytesReceivedTotal = prometheus.NewDesc(
c.connectionBytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_bytes_received_total"),
"Total bytes received for connection",
[]string{"name"},
nil,
)
c.ConnectionCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.connectionCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_compressed_size_of_files_received_bytes_total"),
"Total compressed size of files received on the connection, in bytes",
[]string{"name"},
nil,
)
c.ConnectionFilesReceivedTotal = prometheus.NewDesc(
c.connectionFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_received_files_total"),
"Total number of files received for connection",
[]string{"name"},
nil,
)
c.ConnectionRDCBytesReceivedTotal = prometheus.NewDesc(
c.connectionRDCBytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_rdc_received_bytes_total"),
"Total bytes received on the connection while replicating files using Remote Differential Compression",
[]string{"name"},
nil,
)
c.ConnectionRDCCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.connectionRDCCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_rdc_compressed_size_of_received_files_bytes_total"),
"Total uncompressed size of files received with Remote Differential Compression for connection",
[]string{"name"},
nil,
)
c.ConnectionRDCNumberofFilesReceivedTotal = prometheus.NewDesc(
c.connectionRDCNumberofFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_rdc_received_files_total"),
"Total number of files received using remote differential compression",
[]string{"name"},
nil,
)
c.ConnectionRDCSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.connectionRDCSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_rdc_size_of_received_files_bytes_total"),
"Total size of received Remote Differential Compression files, in bytes.",
[]string{"name"},
nil,
)
c.ConnectionSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.connectionSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_files_received_bytes_total"),
"Total size of files received, in bytes",
[]string{"name"},
nil,
)
c. // Folder
FolderBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
// folder
c.folderBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_bandwidth_savings_using_dfs_replication_bytes_total"),
"Total bytes of bandwidth saved using DFS Replication for this folder",
[]string{"name"},
nil,
)
c.FolderCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.folderCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_compressed_size_of_received_files_bytes_total"),
"Total compressed size of files received on the folder, in bytes",
[]string{"name"},
nil,
)
c.FolderConflictBytesCleanedupTotal = prometheus.NewDesc(
c.folderConflictBytesCleanedupTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_cleaned_up_bytes_total"),
"Total size of conflict loser files and folders deleted from the Conflict and Deleted folder, in bytes",
[]string{"name"},
nil,
)
c.FolderConflictBytesGeneratedTotal = prometheus.NewDesc(
c.folderConflictBytesGeneratedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_generated_bytes_total"),
"Total size of conflict loser files and folders moved to the Conflict and Deleted folder, in bytes",
[]string{"name"},
nil,
)
c.FolderConflictFilesCleanedUpTotal = prometheus.NewDesc(
c.folderConflictFilesCleanedUpTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_cleaned_up_files_total"),
"Number of conflict loser files deleted from the Conflict and Deleted folder",
[]string{"name"},
nil,
)
c.FolderConflictFilesGeneratedTotal = prometheus.NewDesc(
c.folderConflictFilesGeneratedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_generated_files_total"),
"Number of files and folders moved to the Conflict and Deleted folder",
[]string{"name"},
nil,
)
c.FolderConflictFolderCleanupsCompletedTotal = prometheus.NewDesc(
c.folderConflictfolderCleanupsCompletedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_folder_cleanups_total"),
"Number of deletions of conflict loser files and folders in the Conflict and Deleted",
[]string{"name"},
nil,
)
c.FolderConflictSpaceInUse = prometheus.NewDesc(
c.folderConflictSpaceInUse = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_conflict_space_in_use_bytes"),
"Total size of the conflict loser files and folders currently in the Conflict and Deleted folder",
[]string{"name"},
nil,
)
c.FolderDeletedSpaceInUse = prometheus.NewDesc(
c.folderDeletedSpaceInUse = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_deleted_space_in_use_bytes"),
"Total size (in bytes) of the deleted files and folders currently in the Conflict and Deleted folder",
[]string{"name"},
nil,
)
c.FolderDeletedBytesCleanedUpTotal = prometheus.NewDesc(
c.folderDeletedBytesCleanedUpTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_deleted_cleaned_up_bytes_total"),
"Total size (in bytes) of replicating deleted files and folders that were cleaned up from the Conflict and Deleted folder",
[]string{"name"},
nil,
)
c.FolderDeletedBytesGeneratedTotal = prometheus.NewDesc(
c.folderDeletedBytesGeneratedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_deleted_generated_bytes_total"),
"Total size (in bytes) of replicated deleted files and folders that were moved to the Conflict and Deleted folder after they were deleted from a replicated folder on a sending member",
[]string{"name"},
nil,
)
c.FolderDeletedFilesCleanedUpTotal = prometheus.NewDesc(
c.folderDeletedFilesCleanedUpTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_deleted_cleaned_up_files_total"),
"Number of files and folders that were cleaned up from the Conflict and Deleted folder",
[]string{"name"},
nil,
)
c.FolderDeletedFilesGeneratedTotal = prometheus.NewDesc(
c.folderDeletedFilesGeneratedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_deleted_generated_files_total"),
"Number of deleted files and folders that were moved to the Conflict and Deleted folder",
[]string{"name"},
nil,
)
c.FolderFileInstallsRetriedTotal = prometheus.NewDesc(
c.folderFileInstallsRetriedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_file_installs_retried_total"),
"Total number of file installs that are being retried due to sharing violations or other errors encountered when installing the files",
[]string{"name"},
nil,
)
c.FolderFileInstallsSucceededTotal = prometheus.NewDesc(
c.folderFileInstallsSucceededTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_file_installs_succeeded_total"),
"Total number of files that were successfully received from sending members and installed locally on this server",
[]string{"name"},
nil,
)
c.FolderFilesReceivedTotal = prometheus.NewDesc(
c.folderFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_received_files_total"),
"Total number of files received",
[]string{"name"},
nil,
)
c.FolderRDCBytesReceivedTotal = prometheus.NewDesc(
c.folderRDCBytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_rdc_received_bytes_total"),
"Total number of bytes received in replicating files using Remote Differential Compression",
[]string{"name"},
nil,
)
c.FolderRDCCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.folderRDCCompressedSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_rdc_compressed_size_of_received_files_bytes_total"),
"Total compressed size (in bytes) of the files received with Remote Differential Compression",
[]string{"name"},
nil,
)
c.FolderRDCNumberofFilesReceivedTotal = prometheus.NewDesc(
c.folderRDCNumberofFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_rdc_received_files_total"),
"Total number of files received with Remote Differential Compression",
[]string{"name"},
nil,
)
c.FolderRDCSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.folderRDCSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_rdc_files_received_bytes_total"),
"Total uncompressed size (in bytes) of the files received with Remote Differential Compression",
[]string{"name"},
nil,
)
c.FolderSizeOfFilesReceivedTotal = prometheus.NewDesc(
c.folderSizeOfFilesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_files_received_bytes_total"),
"Total uncompressed size (in bytes) of the files received",
[]string{"name"},
nil,
)
c.FolderStagingSpaceInUse = prometheus.NewDesc(
c.folderStagingSpaceInUse = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_staging_space_in_use_bytes"),
"Total size of files and folders currently in the staging folder.",
[]string{"name"},
nil,
)
c.FolderStagingBytesCleanedUpTotal = prometheus.NewDesc(
c.folderStagingBytesCleanedUpTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_staging_cleaned_up_bytes_total"),
"Total size (in bytes) of the files and folders that have been cleaned up from the staging folder",
[]string{"name"},
nil,
)
c.FolderStagingBytesGeneratedTotal = prometheus.NewDesc(
c.folderStagingBytesGeneratedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_staging_generated_bytes_total"),
"Total size (in bytes) of replicated files and folders in the staging folder created by the DFS Replication service since last restart",
[]string{"name"},
nil,
)
c.FolderStagingFilesCleanedUpTotal = prometheus.NewDesc(
c.folderStagingFilesCleanedUpTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_staging_cleaned_up_files_total"),
"Total number of files and folders that have been cleaned up from the staging folder",
[]string{"name"},
nil,
)
c.FolderStagingFilesGeneratedTotal = prometheus.NewDesc(
c.folderStagingFilesGeneratedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_staging_generated_files_total"),
"Total number of times replicated files and folders have been staged by the DFS Replication service",
[]string{"name"},
nil,
)
c.FolderUpdatesDroppedTotal = prometheus.NewDesc(
c.folderUpdatesDroppedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "folder_dropped_updates_total"),
"Total number of redundant file replication update records that have been ignored by the DFS Replication service because they did not change the replicated file or folder",
[]string{"name"},
nil,
)
c. // Volume
VolumeDatabaseCommitsTotal = prometheus.NewDesc(
// volume
c.volumeDatabaseCommitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "volume_database_commits_total"),
"Total number of DFSR Volume database commits",
"Total number of DFSR volume database commits",
[]string{"name"},
nil,
)
c.VolumeDatabaseLookupsTotal = prometheus.NewDesc(
c.volumeDatabaseLookupsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "volume_database_lookups_total"),
"Total number of DFSR Volume database lookups",
"Total number of DFSR volume database lookups",
[]string{"name"},
nil,
)
c.VolumeUSNJournalUnreadPercentage = prometheus.NewDesc(
c.volumeUSNJournalUnreadPercentage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "volume_usn_journal_unread_percentage"),
"Percentage of DFSR Volume USN journal records that are unread",
"Percentage of DFSR volume USN journal records that are unread",
[]string{"name"},
nil,
)
c.VolumeUSNJournalRecordsAcceptedTotal = prometheus.NewDesc(
c.volumeUSNJournalRecordsAcceptedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "volume_usn_journal_accepted_records_total"),
"Total number of USN journal records accepted",
[]string{"name"},
nil,
)
c.VolumeUSNJournalRecordsReadTotal = prometheus.NewDesc(
c.volumeUSNJournalRecordsReadTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "volume_usn_journal_read_records_total"),
"Total number of DFSR Volume USN journal records read",
"Total number of DFSR volume USN journal records read",
[]string{"name"},
nil,
)
c.dfsrChildCollectors = c.getDFSRChildCollectors(utils.ExpandEnabledChildCollectors(*c.dfsrEnabledCollectors))
// Perflib sources are dynamic, depending on the enabled child collectors
expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled)
c.dfsrChildCollectors = c.getDFSRChildCollectors(expandedChildCollectors)
return nil
}
// Maps enabled child collectors names to their relevant collection function,
// for use in collector.Collect()
func (c *collector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCollectorFunc {
// for use in Collector.Collect().
func (c *Collector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCollectorFunc {
var dfsrCollectors []dfsrCollectorFunc
for _, collector := range enabledCollectors {
switch collector {
@@ -457,7 +479,7 @@ func (c *collector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCol
// Collect implements the Collector interface.
// Sends metric values for each metric to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
for _, fn := range c.dfsrChildCollectors {
err := fn(ctx, ch)
if err != nil {
@@ -467,7 +489,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
return nil
}
// PerflibDFSRConnection Perflib: "DFS Replication Service Connections"
// PerflibDFSRConnection Perflib: "DFS Replication Service Connections".
type PerflibDFSRConnection struct {
Name string
@@ -482,7 +504,7 @@ type PerflibDFSRConnection struct {
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
}
func (c *collector) collectConnection(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectConnection(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRConnection
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Connections"], &dst, c.logger); err != nil {
return err
@@ -490,74 +512,73 @@ func (c *collector) collectConnection(ctx *types.ScrapeContext, ch chan<- promet
for _, connection := range dst {
ch <- prometheus.MustNewConstMetric(
c.ConnectionBandwidthSavingsUsingDFSReplicationTotal,
c.connectionBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
connection.BandwidthSavingsUsingDFSReplicationTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionBytesReceivedTotal,
c.connectionBytesReceivedTotal,
prometheus.CounterValue,
connection.BytesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionCompressedSizeOfFilesReceivedTotal,
c.connectionCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.CompressedSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionFilesReceivedTotal,
c.connectionFilesReceivedTotal,
prometheus.CounterValue,
connection.FilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCBytesReceivedTotal,
c.connectionRDCBytesReceivedTotal,
prometheus.CounterValue,
connection.RDCBytesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCCompressedSizeOfFilesReceivedTotal,
c.connectionRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCCompressedSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCSizeOfFilesReceivedTotal,
c.connectionRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionRDCNumberofFilesReceivedTotal,
c.connectionRDCNumberofFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCNumberofFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionSizeOfFilesReceivedTotal,
c.connectionSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.SizeOfFilesReceivedTotal,
connection.Name,
)
}
return nil
}
// PerflibDFSRFolder Perflib: "DFS Replicated Folder"
type PerflibDFSRFolder struct {
// perflibDFSRFolder Perflib: "DFS Replicated Folder".
type perflibDFSRFolder struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
@@ -566,7 +587,7 @@ type PerflibDFSRFolder struct {
ConflictBytesGeneratedTotal float64 `perflib:"Conflict Bytes Generated"`
ConflictFilesCleanedUpTotal float64 `perflib:"Conflict Files Cleaned Up"`
ConflictFilesGeneratedTotal float64 `perflib:"Conflict Files Generated"`
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict Folder Cleanups Completed"`
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict folder Cleanups Completed"`
ConflictSpaceInUse float64 `perflib:"Conflict Space In Use"`
DeletedSpaceInUse float64 `perflib:"Deleted Space In Use"`
DeletedBytesCleanedUpTotal float64 `perflib:"Deleted Bytes Cleaned Up"`
@@ -589,197 +610,197 @@ type PerflibDFSRFolder struct {
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
}
func (c *collector) collectFolder(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRFolder
func (c *Collector) collectFolder(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []perflibDFSRFolder
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replicated Folders"], &dst, c.logger); err != nil {
return err
}
for _, folder := range dst {
ch <- prometheus.MustNewConstMetric(
c.FolderBandwidthSavingsUsingDFSReplicationTotal,
c.folderBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
folder.BandwidthSavingsUsingDFSReplicationTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderCompressedSizeOfFilesReceivedTotal,
c.folderCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.CompressedSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictBytesCleanedupTotal,
c.folderConflictBytesCleanedupTotal,
prometheus.CounterValue,
folder.ConflictBytesCleanedupTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictBytesGeneratedTotal,
c.folderConflictBytesGeneratedTotal,
prometheus.CounterValue,
folder.ConflictBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictFilesCleanedUpTotal,
c.folderConflictFilesCleanedUpTotal,
prometheus.CounterValue,
folder.ConflictFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictFilesGeneratedTotal,
c.folderConflictFilesGeneratedTotal,
prometheus.CounterValue,
folder.ConflictFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictFolderCleanupsCompletedTotal,
c.folderConflictfolderCleanupsCompletedTotal,
prometheus.CounterValue,
folder.ConflictFolderCleanupsCompletedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderConflictSpaceInUse,
c.folderConflictSpaceInUse,
prometheus.GaugeValue,
folder.ConflictSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedSpaceInUse,
c.folderDeletedSpaceInUse,
prometheus.GaugeValue,
folder.DeletedSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedBytesCleanedUpTotal,
c.folderDeletedBytesCleanedUpTotal,
prometheus.CounterValue,
folder.DeletedBytesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedBytesGeneratedTotal,
c.folderDeletedBytesGeneratedTotal,
prometheus.CounterValue,
folder.DeletedBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedFilesCleanedUpTotal,
c.folderDeletedFilesCleanedUpTotal,
prometheus.CounterValue,
folder.DeletedFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderDeletedFilesGeneratedTotal,
c.folderDeletedFilesGeneratedTotal,
prometheus.CounterValue,
folder.DeletedFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderFileInstallsRetriedTotal,
c.folderFileInstallsRetriedTotal,
prometheus.CounterValue,
folder.FileInstallsRetriedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderFileInstallsSucceededTotal,
c.folderFileInstallsSucceededTotal,
prometheus.CounterValue,
folder.FileInstallsSucceededTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderFilesReceivedTotal,
c.folderFilesReceivedTotal,
prometheus.CounterValue,
folder.FilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCBytesReceivedTotal,
c.folderRDCBytesReceivedTotal,
prometheus.CounterValue,
folder.RDCBytesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCCompressedSizeOfFilesReceivedTotal,
c.folderRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCCompressedSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCNumberofFilesReceivedTotal,
c.folderRDCNumberofFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCNumberofFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderRDCSizeOfFilesReceivedTotal,
c.folderRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderSizeOfFilesReceivedTotal,
c.folderSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.SizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingSpaceInUse,
c.folderStagingSpaceInUse,
prometheus.GaugeValue,
folder.StagingSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingBytesCleanedUpTotal,
c.folderStagingBytesCleanedUpTotal,
prometheus.CounterValue,
folder.StagingBytesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingBytesGeneratedTotal,
c.folderStagingBytesGeneratedTotal,
prometheus.CounterValue,
folder.StagingBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingFilesCleanedUpTotal,
c.folderStagingFilesCleanedUpTotal,
prometheus.CounterValue,
folder.StagingFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderStagingFilesGeneratedTotal,
c.folderStagingFilesGeneratedTotal,
prometheus.CounterValue,
folder.StagingFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FolderUpdatesDroppedTotal,
c.folderUpdatesDroppedTotal,
prometheus.CounterValue,
folder.UpdatesDroppedTotal,
folder.Name,
@@ -788,8 +809,8 @@ func (c *collector) collectFolder(ctx *types.ScrapeContext, ch chan<- prometheus
return nil
}
// PerflibDFSRVolume Perflib: "DFS Replication Service Volumes"
type PerflibDFSRVolume struct {
// perflibDFSRVolume Perflib: "DFS Replication Service Volumes".
type perflibDFSRVolume struct {
Name string
DatabaseCommitsTotal float64 `perflib:"Database Commits"`
@@ -799,48 +820,47 @@ type PerflibDFSRVolume struct {
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
}
func (c *collector) collectVolume(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRVolume
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Service Volumes"], &dst, c.logger); err != nil {
func (c *Collector) collectVolume(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []perflibDFSRVolume
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Service volumes"], &dst, c.logger); err != nil {
return err
}
for _, volume := range dst {
ch <- prometheus.MustNewConstMetric(
c.VolumeDatabaseLookupsTotal,
c.volumeDatabaseLookupsTotal,
prometheus.CounterValue,
volume.DatabaseLookupsTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeDatabaseCommitsTotal,
c.volumeDatabaseCommitsTotal,
prometheus.CounterValue,
volume.DatabaseCommitsTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeUSNJournalRecordsAcceptedTotal,
c.volumeUSNJournalRecordsAcceptedTotal,
prometheus.CounterValue,
volume.USNJournalRecordsAcceptedTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeUSNJournalRecordsReadTotal,
c.volumeUSNJournalRecordsReadTotal,
prometheus.CounterValue,
volume.USNJournalRecordsReadTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VolumeUSNJournalUnreadPercentage,
c.volumeUSNJournalUnreadPercentage,
prometheus.GaugeValue,
volume.USNJournalUnreadPercentage,
volume.Name,
)
}
return nil
}

View File

@@ -16,205 +16,218 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector perflib DHCP metrics
type collector struct {
// A Collector is a Prometheus Collector perflib DHCP metrics.
type Collector struct {
config Config
logger log.Logger
PacketsReceivedTotal *prometheus.Desc
DuplicatesDroppedTotal *prometheus.Desc
PacketsExpiredTotal *prometheus.Desc
ActiveQueueLength *prometheus.Desc
ConflictCheckQueueLength *prometheus.Desc
DiscoversTotal *prometheus.Desc
OffersTotal *prometheus.Desc
RequestsTotal *prometheus.Desc
InformsTotal *prometheus.Desc
AcksTotal *prometheus.Desc
NacksTotal *prometheus.Desc
DeclinesTotal *prometheus.Desc
ReleasesTotal *prometheus.Desc
OfferQueueLength *prometheus.Desc
DeniedDueToMatch *prometheus.Desc
DeniedDueToNonMatch *prometheus.Desc
FailoverBndupdSentTotal *prometheus.Desc
FailoverBndupdReceivedTotal *prometheus.Desc
FailoverBndackSentTotal *prometheus.Desc
FailoverBndackReceivedTotal *prometheus.Desc
FailoverBndupdPendingOutboundQueue *prometheus.Desc
FailoverTransitionsCommunicationinterruptedState *prometheus.Desc
FailoverTransitionsPartnerdownState *prometheus.Desc
FailoverTransitionsRecoverState *prometheus.Desc
FailoverBndupdDropped *prometheus.Desc
acksTotal *prometheus.Desc
activeQueueLength *prometheus.Desc
conflictCheckQueueLength *prometheus.Desc
declinesTotal *prometheus.Desc
deniedDueToMatch *prometheus.Desc
deniedDueToNonMatch *prometheus.Desc
discoversTotal *prometheus.Desc
duplicatesDroppedTotal *prometheus.Desc
failoverBndackReceivedTotal *prometheus.Desc
failoverBndackSentTotal *prometheus.Desc
failoverBndupdDropped *prometheus.Desc
failoverBndupdPendingOutboundQueue *prometheus.Desc
failoverBndupdReceivedTotal *prometheus.Desc
failoverBndupdSentTotal *prometheus.Desc
failoverTransitionsCommunicationInterruptedState *prometheus.Desc
failoverTransitionsPartnerDownState *prometheus.Desc
failoverTransitionsRecoverState *prometheus.Desc
informsTotal *prometheus.Desc
nACKsTotal *prometheus.Desc
offerQueueLength *prometheus.Desc
offersTotal *prometheus.Desc
packetsExpiredTotal *prometheus.Desc
packetsReceivedTotal *prometheus.Desc
releasesTotal *prometheus.Desc
requestsTotal *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"DHCP Server"}, nil
}
func (c *collector) Build() error {
c.PacketsReceivedTotal = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.packetsReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
nil,
nil,
)
c.DuplicatesDroppedTotal = prometheus.NewDesc(
c.duplicatesDroppedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "duplicates_dropped_total"),
"Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)",
nil,
nil,
)
c.PacketsExpiredTotal = prometheus.NewDesc(
c.packetsExpiredTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_expired_total"),
"Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)",
nil,
nil,
)
c.ActiveQueueLength = prometheus.NewDesc(
c.activeQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "active_queue_length"),
"Number of packets in the processing queue of the DHCP server (ActiveQueueLength)",
nil,
nil,
)
c.ConflictCheckQueueLength = prometheus.NewDesc(
c.conflictCheckQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "conflict_check_queue_length"),
"Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)",
nil,
nil,
)
c.DiscoversTotal = prometheus.NewDesc(
c.discoversTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "discovers_total"),
"Total DHCP Discovers received by the DHCP server (DiscoversTotal)",
nil,
nil,
)
c.OffersTotal = prometheus.NewDesc(
c.offersTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "offers_total"),
"Total DHCP Offers sent by the DHCP server (OffersTotal)",
nil,
nil,
)
c.RequestsTotal = prometheus.NewDesc(
c.requestsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Total DHCP Requests received by the DHCP server (RequestsTotal)",
nil,
nil,
)
c.InformsTotal = prometheus.NewDesc(
c.informsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "informs_total"),
"Total DHCP Informs received by the DHCP server (InformsTotal)",
nil,
nil,
)
c.AcksTotal = prometheus.NewDesc(
c.acksTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "acks_total"),
"Total DHCP Acks sent by the DHCP server (AcksTotal)",
nil,
nil,
)
c.NacksTotal = prometheus.NewDesc(
c.nACKsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "nacks_total"),
"Total DHCP Nacks sent by the DHCP server (NacksTotal)",
nil,
nil,
)
c.DeclinesTotal = prometheus.NewDesc(
c.declinesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "declines_total"),
"Total DHCP Declines received by the DHCP server (DeclinesTotal)",
nil,
nil,
)
c.ReleasesTotal = prometheus.NewDesc(
c.releasesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "releases_total"),
"Total DHCP Releases received by the DHCP server (ReleasesTotal)",
nil,
nil,
)
c.OfferQueueLength = prometheus.NewDesc(
c.offerQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "offer_queue_length"),
"Number of packets in the offer queue of the DHCP server (OfferQueueLength)",
nil,
nil,
)
c.DeniedDueToMatch = prometheus.NewDesc(
c.deniedDueToMatch = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_match_total"),
"Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)",
nil,
nil,
)
c.DeniedDueToNonMatch = prometheus.NewDesc(
c.deniedDueToNonMatch = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_nonmatch_total"),
"Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)",
nil,
nil,
)
c.FailoverBndupdSentTotal = prometheus.NewDesc(
c.failoverBndupdSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_sent_total"),
"Number of DHCP fail over Binding Update messages sent (FailoverBndupdSentTotal)",
nil,
nil,
)
c.FailoverBndupdReceivedTotal = prometheus.NewDesc(
c.failoverBndupdReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_received_total"),
"Number of DHCP fail over Binding Update messages received (FailoverBndupdReceivedTotal)",
nil,
nil,
)
c.FailoverBndackSentTotal = prometheus.NewDesc(
c.failoverBndackSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_sent_total"),
"Number of DHCP fail over Binding Ack messages sent (FailoverBndackSentTotal)",
nil,
nil,
)
c.FailoverBndackReceivedTotal = prometheus.NewDesc(
c.failoverBndackReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_received_total"),
"Number of DHCP fail over Binding Ack messages received (FailoverBndackReceivedTotal)",
nil,
nil,
)
c.FailoverBndupdPendingOutboundQueue = prometheus.NewDesc(
c.failoverBndupdPendingOutboundQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_pending_in_outbound_queue"),
"Number of pending outbound DHCP fail over Binding Update messages (FailoverBndupdPendingOutboundQueue)",
nil,
nil,
)
c.FailoverTransitionsCommunicationinterruptedState = prometheus.NewDesc(
c.failoverTransitionsCommunicationInterruptedState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_communicationinterrupted_state_total"),
"Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)",
nil,
nil,
)
c.FailoverTransitionsPartnerdownState = prometheus.NewDesc(
c.failoverTransitionsPartnerDownState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_partnerdown_state_total"),
"Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)",
nil,
nil,
)
c.FailoverTransitionsRecoverState = prometheus.NewDesc(
c.failoverTransitionsRecoverState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_recover_total"),
"Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)",
nil,
nil,
)
c.FailoverBndupdDropped = prometheus.NewDesc(
c.failoverBndupdDropped = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_dropped_total"),
"Total number of DHCP fail over Binding Updates dropped (FailoverBndupdDropped)",
nil,
@@ -254,158 +267,158 @@ type dhcpPerf struct {
FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."`
}
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dhcpPerfs []dhcpPerf
if err := perflib.UnmarshalObject(ctx.PerfObjects["DHCP Server"], &dhcpPerfs, c.logger); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.PacketsReceivedTotal,
c.packetsReceivedTotal,
prometheus.CounterValue,
dhcpPerfs[0].PacketsReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DuplicatesDroppedTotal,
c.duplicatesDroppedTotal,
prometheus.CounterValue,
dhcpPerfs[0].DuplicatesDroppedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsExpiredTotal,
c.packetsExpiredTotal,
prometheus.CounterValue,
dhcpPerfs[0].PacketsExpiredTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveQueueLength,
c.activeQueueLength,
prometheus.GaugeValue,
dhcpPerfs[0].ActiveQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.ConflictCheckQueueLength,
c.conflictCheckQueueLength,
prometheus.GaugeValue,
dhcpPerfs[0].ConflictCheckQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.DiscoversTotal,
c.discoversTotal,
prometheus.CounterValue,
dhcpPerfs[0].DiscoversTotal,
)
ch <- prometheus.MustNewConstMetric(
c.OffersTotal,
c.offersTotal,
prometheus.CounterValue,
dhcpPerfs[0].OffersTotal,
)
ch <- prometheus.MustNewConstMetric(
c.RequestsTotal,
c.requestsTotal,
prometheus.CounterValue,
dhcpPerfs[0].RequestsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.InformsTotal,
c.informsTotal,
prometheus.CounterValue,
dhcpPerfs[0].InformsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AcksTotal,
c.acksTotal,
prometheus.CounterValue,
dhcpPerfs[0].AcksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.NacksTotal,
c.nACKsTotal,
prometheus.CounterValue,
dhcpPerfs[0].NacksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DeclinesTotal,
c.declinesTotal,
prometheus.CounterValue,
dhcpPerfs[0].DeclinesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ReleasesTotal,
c.releasesTotal,
prometheus.CounterValue,
dhcpPerfs[0].ReleasesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.OfferQueueLength,
c.offerQueueLength,
prometheus.GaugeValue,
dhcpPerfs[0].OfferQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.DeniedDueToMatch,
c.deniedDueToMatch,
prometheus.CounterValue,
dhcpPerfs[0].DeniedDueToMatch,
)
ch <- prometheus.MustNewConstMetric(
c.DeniedDueToNonMatch,
c.deniedDueToNonMatch,
prometheus.CounterValue,
dhcpPerfs[0].DeniedDueToNonMatch,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdSentTotal,
c.failoverBndupdSentTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndupdSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdReceivedTotal,
c.failoverBndupdReceivedTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndupdReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndackSentTotal,
c.failoverBndackSentTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndackSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndackReceivedTotal,
c.failoverBndackReceivedTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndackReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdPendingOutboundQueue,
c.failoverBndupdPendingOutboundQueue,
prometheus.GaugeValue,
dhcpPerfs[0].FailoverBndupdPendingOutboundQueue,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsCommunicationinterruptedState,
c.failoverTransitionsCommunicationInterruptedState,
prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsCommunicationinterruptedState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsPartnerdownState,
c.failoverTransitionsPartnerDownState,
prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsPartnerdownState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsRecoverState,
c.failoverTransitionsRecoverState,
prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsRecoverState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdDropped,
c.failoverBndupdDropped,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndupdDropped,
)

View File

@@ -23,41 +23,54 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for a few WMI metrics in Win32_DiskDrive
type collector struct {
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive.
type Collector struct {
config Config
logger log.Logger
DiskInfo *prometheus.Desc
Status *prometheus.Desc
Size *prometheus.Desc
Partitions *prometheus.Desc
Availability *prometheus.Desc
availability *prometheus.Desc
diskInfo *prometheus.Desc
partitions *prometheus.Desc
size *prometheus.Desc
status *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.DiskInfo = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.diskInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"General drive information",
[]string{
@@ -68,25 +81,25 @@ func (c *collector) Build() error {
},
nil,
)
c.Status = prometheus.NewDesc(
c.status = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status"),
"Status of the drive",
[]string{"name", "status"},
nil,
)
c.Size = prometheus.NewDesc(
c.size = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "size"),
"Size of the disk drive. It is calculated by multiplying the total number of cylinders, tracks in each cylinder, sectors in each track, and bytes in each sector.",
[]string{"name"},
nil,
)
c.Partitions = prometheus.NewDesc(
c.partitions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "partitions"),
"Number of partitions",
[]string{"name"},
nil,
)
c.Availability = prometheus.NewDesc(
c.availability = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availability"),
"Availability Status",
[]string{"name", "availability"},
@@ -96,7 +109,7 @@ func (c *collector) Build() error {
return nil
}
type Win32_DiskDrive struct {
type win32_DiskDrive struct {
DeviceID string
Model string
Size uint64
@@ -149,7 +162,7 @@ var (
)
// Collect sends the metric values for each metric to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "err", err)
return err
@@ -157,8 +170,8 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_DiskDrive
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32_DiskDrive
if err := wmi.Query(win32DiskQuery, &dst); err != nil {
return err
@@ -169,13 +182,13 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
for _, disk := range dst {
ch <- prometheus.MustNewConstMetric(
c.DiskInfo,
c.diskInfo,
prometheus.GaugeValue,
1.0,
strings.Trim(disk.DeviceID, "\\.\\"),
strings.Trim(disk.DeviceID, "\\.\\"), //nolint:staticcheck
strings.TrimRight(disk.Model, " "),
strings.TrimRight(disk.Caption, " "),
strings.TrimRight(disk.Name, "\\.\\"),
strings.TrimRight(disk.Name, "\\.\\"), //nolint:staticcheck
)
for _, status := range allDiskStatus {
@@ -185,26 +198,26 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.Status,
c.status,
prometheus.GaugeValue,
isCurrentState,
strings.Trim(disk.Name, "\\.\\"),
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
status,
)
}
ch <- prometheus.MustNewConstMetric(
c.Size,
c.size,
prometheus.GaugeValue,
float64(disk.Size),
strings.Trim(disk.Name, "\\.\\"),
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
)
ch <- prometheus.MustNewConstMetric(
c.Partitions,
c.partitions,
prometheus.GaugeValue,
float64(disk.Partitions),
strings.Trim(disk.Name, "\\.\\"),
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
)
for availNum, val := range availMap {
@@ -213,10 +226,10 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.Availability,
c.availability,
prometheus.GaugeValue,
isCurrentState,
strings.Trim(disk.Name, "\\.\\"),
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
val,
)
}

View File

@@ -19,184 +19,197 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
type Collector struct {
config Config
logger log.Logger
ZoneTransferRequestsReceived *prometheus.Desc
ZoneTransferRequestsSent *prometheus.Desc
ZoneTransferResponsesReceived *prometheus.Desc
ZoneTransferSuccessReceived *prometheus.Desc
ZoneTransferSuccessSent *prometheus.Desc
ZoneTransferFailures *prometheus.Desc
MemoryUsedBytes *prometheus.Desc
DynamicUpdatesQueued *prometheus.Desc
DynamicUpdatesReceived *prometheus.Desc
DynamicUpdatesFailures *prometheus.Desc
NotifyReceived *prometheus.Desc
NotifySent *prometheus.Desc
SecureUpdateFailures *prometheus.Desc
SecureUpdateReceived *prometheus.Desc
Queries *prometheus.Desc
Responses *prometheus.Desc
RecursiveQueries *prometheus.Desc
RecursiveQueryFailures *prometheus.Desc
RecursiveQuerySendTimeouts *prometheus.Desc
WinsQueries *prometheus.Desc
WinsResponses *prometheus.Desc
UnmatchedResponsesReceived *prometheus.Desc
dynamicUpdatesFailures *prometheus.Desc
dynamicUpdatesQueued *prometheus.Desc
dynamicUpdatesReceived *prometheus.Desc
memoryUsedBytes *prometheus.Desc
notifyReceived *prometheus.Desc
notifySent *prometheus.Desc
queries *prometheus.Desc
recursiveQueries *prometheus.Desc
recursiveQueryFailures *prometheus.Desc
recursiveQuerySendTimeouts *prometheus.Desc
responses *prometheus.Desc
secureUpdateFailures *prometheus.Desc
secureUpdateReceived *prometheus.Desc
unmatchedResponsesReceived *prometheus.Desc
winsQueries *prometheus.Desc
winsResponses *prometheus.Desc
zoneTransferFailures *prometheus.Desc
zoneTransferRequestsReceived *prometheus.Desc
zoneTransferRequestsSent *prometheus.Desc
zoneTransferResponsesReceived *prometheus.Desc
zoneTransferSuccessReceived *prometheus.Desc
zoneTransferSuccessSent *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.ZoneTransferRequestsReceived = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.zoneTransferRequestsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
[]string{"qtype"},
nil,
)
c.ZoneTransferRequestsSent = prometheus.NewDesc(
c.zoneTransferRequestsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_sent_total"),
"Number of zone transfer requests (AXFR/IXFR) sent by the secondary DNS server",
[]string{"qtype"},
nil,
)
c.ZoneTransferResponsesReceived = prometheus.NewDesc(
c.zoneTransferResponsesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_response_received_total"),
"Number of zone transfer responses (AXFR/IXFR) received by the secondary DNS server",
[]string{"qtype"},
nil,
)
c.ZoneTransferSuccessReceived = prometheus.NewDesc(
c.zoneTransferSuccessReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_success_received_total"),
"Number of successful zone transfers (AXFR/IXFR) received by the secondary DNS server",
[]string{"qtype", "protocol"},
nil,
)
c.ZoneTransferSuccessSent = prometheus.NewDesc(
c.zoneTransferSuccessSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_success_sent_total"),
"Number of successful zone transfers (AXFR/IXFR) of the master DNS server",
[]string{"qtype"},
nil,
)
c.ZoneTransferFailures = prometheus.NewDesc(
c.zoneTransferFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_failures_total"),
"Number of failed zone transfers of the master DNS server",
nil,
nil,
)
c.MemoryUsedBytes = prometheus.NewDesc(
c.memoryUsedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memory_used_bytes"),
"Current memory used by DNS server",
[]string{"area"},
nil,
)
c.DynamicUpdatesQueued = prometheus.NewDesc(
c.dynamicUpdatesQueued = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dynamic_updates_queued"),
"Number of dynamic updates queued by the DNS server",
nil,
nil,
)
c.DynamicUpdatesReceived = prometheus.NewDesc(
c.dynamicUpdatesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dynamic_updates_received_total"),
"Number of secure update requests received by the DNS server",
[]string{"operation"},
nil,
)
c.DynamicUpdatesFailures = prometheus.NewDesc(
c.dynamicUpdatesFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dynamic_updates_failures_total"),
"Number of dynamic updates which timed out or were rejected by the DNS server",
[]string{"reason"},
nil,
)
c.NotifyReceived = prometheus.NewDesc(
c.notifyReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "notify_received_total"),
"Number of notifies received by the secondary DNS server",
nil,
nil,
)
c.NotifySent = prometheus.NewDesc(
c.notifySent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "notify_sent_total"),
"Number of notifies sent by the master DNS server",
nil,
nil,
)
c.SecureUpdateFailures = prometheus.NewDesc(
c.secureUpdateFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "secure_update_failures_total"),
"Number of secure updates that failed on the DNS server",
nil,
nil,
)
c.SecureUpdateReceived = prometheus.NewDesc(
c.secureUpdateReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "secure_update_received_total"),
"Number of secure update requests received by the DNS server",
nil,
nil,
)
c.Queries = prometheus.NewDesc(
c.queries = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "queries_total"),
"Number of queries received by DNS server",
[]string{"protocol"},
nil,
)
c.Responses = prometheus.NewDesc(
c.responses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "responses_total"),
"Number of responses sent by DNS server",
[]string{"protocol"},
nil,
)
c.RecursiveQueries = prometheus.NewDesc(
c.recursiveQueries = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recursive_queries_total"),
"Number of recursive queries received by DNS server",
nil,
nil,
)
c.RecursiveQueryFailures = prometheus.NewDesc(
c.recursiveQueryFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recursive_query_failures_total"),
"Number of recursive query failures",
nil,
nil,
)
c.RecursiveQuerySendTimeouts = prometheus.NewDesc(
c.recursiveQuerySendTimeouts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recursive_query_send_timeouts_total"),
"Number of recursive query sending timeouts",
nil,
nil,
)
c.WinsQueries = prometheus.NewDesc(
c.winsQueries = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "wins_queries_total"),
"Number of WINS lookup requests received by the server",
[]string{"direction"},
nil,
)
c.WinsResponses = prometheus.NewDesc(
c.winsResponses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "wins_responses_total"),
"Number of WINS lookup responses sent by the server",
[]string{"direction"},
nil,
)
c.UnmatchedResponsesReceived = prometheus.NewDesc(
c.unmatchedResponsesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "unmatched_responses_total"),
"Number of response packets received by the DNS server that do not match any outstanding remote query",
nil,
@@ -207,7 +220,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting dns metrics", "err", err)
return err
@@ -261,7 +274,7 @@ type Win32_PerfRawData_DNS_DNS struct {
ZoneTransferSOARequestSent uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_DNS_DNS
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -272,66 +285,66 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferRequestsReceived,
c.zoneTransferRequestsReceived,
prometheus.CounterValue,
float64(dst[0].AXFRRequestReceived),
"full",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferRequestsReceived,
c.zoneTransferRequestsReceived,
prometheus.CounterValue,
float64(dst[0].IXFRRequestReceived),
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferRequestsSent,
c.zoneTransferRequestsSent,
prometheus.CounterValue,
float64(dst[0].AXFRRequestSent),
"full",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferRequestsSent,
c.zoneTransferRequestsSent,
prometheus.CounterValue,
float64(dst[0].IXFRRequestSent),
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferRequestsSent,
c.zoneTransferRequestsSent,
prometheus.CounterValue,
float64(dst[0].ZoneTransferSOARequestSent),
"soa",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferResponsesReceived,
c.zoneTransferResponsesReceived,
prometheus.CounterValue,
float64(dst[0].AXFRResponseReceived),
"full",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferResponsesReceived,
c.zoneTransferResponsesReceived,
prometheus.CounterValue,
float64(dst[0].IXFRResponseReceived),
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferSuccessReceived,
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
float64(dst[0].AXFRSuccessReceived),
"full",
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferSuccessReceived,
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
float64(dst[0].IXFRTCPSuccessReceived),
"incremental",
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferSuccessReceived,
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
float64(dst[0].IXFRTCPSuccessReceived),
"incremental",
@@ -339,183 +352,183 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferSuccessSent,
c.zoneTransferSuccessSent,
prometheus.CounterValue,
float64(dst[0].AXFRSuccessSent),
"full",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferSuccessSent,
c.zoneTransferSuccessSent,
prometheus.CounterValue,
float64(dst[0].IXFRSuccessSent),
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.ZoneTransferFailures,
c.zoneTransferFailures,
prometheus.CounterValue,
float64(dst[0].ZoneTransferFailure),
)
ch <- prometheus.MustNewConstMetric(
c.MemoryUsedBytes,
c.memoryUsedBytes,
prometheus.GaugeValue,
float64(dst[0].CachingMemory),
"caching",
)
ch <- prometheus.MustNewConstMetric(
c.MemoryUsedBytes,
c.memoryUsedBytes,
prometheus.GaugeValue,
float64(dst[0].DatabaseNodeMemory),
"database_node",
)
ch <- prometheus.MustNewConstMetric(
c.MemoryUsedBytes,
c.memoryUsedBytes,
prometheus.GaugeValue,
float64(dst[0].NbstatMemory),
"nbstat",
)
ch <- prometheus.MustNewConstMetric(
c.MemoryUsedBytes,
c.memoryUsedBytes,
prometheus.GaugeValue,
float64(dst[0].RecordFlowMemory),
"record_flow",
)
ch <- prometheus.MustNewConstMetric(
c.MemoryUsedBytes,
c.memoryUsedBytes,
prometheus.GaugeValue,
float64(dst[0].TCPMessageMemory),
"tcp_message",
)
ch <- prometheus.MustNewConstMetric(
c.MemoryUsedBytes,
c.memoryUsedBytes,
prometheus.GaugeValue,
float64(dst[0].UDPMessageMemory),
"udp_message",
)
ch <- prometheus.MustNewConstMetric(
c.DynamicUpdatesReceived,
c.dynamicUpdatesReceived,
prometheus.CounterValue,
float64(dst[0].DynamicUpdateNoOperation),
"noop",
)
ch <- prometheus.MustNewConstMetric(
c.DynamicUpdatesReceived,
c.dynamicUpdatesReceived,
prometheus.CounterValue,
float64(dst[0].DynamicUpdateWrittentoDatabase),
"written",
)
ch <- prometheus.MustNewConstMetric(
c.DynamicUpdatesQueued,
c.dynamicUpdatesQueued,
prometheus.GaugeValue,
float64(dst[0].DynamicUpdateQueued),
)
ch <- prometheus.MustNewConstMetric(
c.DynamicUpdatesFailures,
c.dynamicUpdatesFailures,
prometheus.CounterValue,
float64(dst[0].DynamicUpdateRejected),
"rejected",
)
ch <- prometheus.MustNewConstMetric(
c.DynamicUpdatesFailures,
c.dynamicUpdatesFailures,
prometheus.CounterValue,
float64(dst[0].DynamicUpdateTimeOuts),
"timeout",
)
ch <- prometheus.MustNewConstMetric(
c.NotifyReceived,
c.notifyReceived,
prometheus.CounterValue,
float64(dst[0].NotifyReceived),
)
ch <- prometheus.MustNewConstMetric(
c.NotifySent,
c.notifySent,
prometheus.CounterValue,
float64(dst[0].NotifySent),
)
ch <- prometheus.MustNewConstMetric(
c.RecursiveQueries,
c.recursiveQueries,
prometheus.CounterValue,
float64(dst[0].RecursiveQueries),
)
ch <- prometheus.MustNewConstMetric(
c.RecursiveQueryFailures,
c.recursiveQueryFailures,
prometheus.CounterValue,
float64(dst[0].RecursiveQueryFailure),
)
ch <- prometheus.MustNewConstMetric(
c.RecursiveQuerySendTimeouts,
c.recursiveQuerySendTimeouts,
prometheus.CounterValue,
float64(dst[0].RecursiveSendTimeOuts),
)
ch <- prometheus.MustNewConstMetric(
c.Queries,
c.queries,
prometheus.CounterValue,
float64(dst[0].TCPQueryReceived),
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.Queries,
c.queries,
prometheus.CounterValue,
float64(dst[0].UDPQueryReceived),
"udp",
)
ch <- prometheus.MustNewConstMetric(
c.Responses,
c.responses,
prometheus.CounterValue,
float64(dst[0].TCPResponseSent),
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.Responses,
c.responses,
prometheus.CounterValue,
float64(dst[0].UDPResponseSent),
"udp",
)
ch <- prometheus.MustNewConstMetric(
c.UnmatchedResponsesReceived,
c.unmatchedResponsesReceived,
prometheus.CounterValue,
float64(dst[0].UnmatchedResponsesReceived),
)
ch <- prometheus.MustNewConstMetric(
c.WinsQueries,
c.winsQueries,
prometheus.CounterValue,
float64(dst[0].WINSLookupReceived),
"forward",
)
ch <- prometheus.MustNewConstMetric(
c.WinsQueries,
c.winsQueries,
prometheus.CounterValue,
float64(dst[0].WINSReverseLookupReceived),
"reverse",
)
ch <- prometheus.MustNewConstMetric(
c.WinsResponses,
c.winsResponses,
prometheus.CounterValue,
float64(dst[0].WINSResponseSent),
"forward",
)
ch <- prometheus.MustNewConstMetric(
c.WinsResponses,
c.winsResponses,
prometheus.CounterValue,
float64(dst[0].WINSReverseResponseSent),
"reverse",
)
ch <- prometheus.MustNewConstMetric(
c.SecureUpdateFailures,
c.secureUpdateFailures,
prometheus.CounterValue,
float64(dst[0].SecureUpdateFailure),
)
ch <- prometheus.MustNewConstMetric(
c.SecureUpdateReceived,
c.secureUpdateReceived,
prometheus.CounterValue,
float64(dst[0].SecureUpdateReceived),
)

View File

@@ -13,123 +13,161 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "exchange"
FlagExchangeListAllCollectors = "collectors.exchange.list"
FlagExchangeCollectorsEnabled = "collectors.exchange.enabled"
)
const Name = "exchange"
type Config struct {
CollectorsEnabled string `yaml:"collectors_enabled"`
CollectorsEnabled []string `yaml:"collectors_enabled"`
}
var ConfigDefaults = Config{
CollectorsEnabled: "",
CollectorsEnabled: []string{
"ADAccessProcesses",
"TransportQueues",
"HttpProxy",
"ActiveSync",
"AvailabilityService",
"OutlookWebAccess",
"Autodiscover",
"WorkloadManagement",
"RpcClientAccess",
"MapiHttpEmsmdb",
},
}
type collector struct {
type Collector struct {
config Config
logger log.Logger
exchangeListAllCollectors *bool
exchangeCollectorsEnabled *string
LDAPReadTime *prometheus.Desc
LDAPSearchTime *prometheus.Desc
LDAPWriteTime *prometheus.Desc
LDAPTimeoutErrorsPerSec *prometheus.Desc
LongRunningLDAPOperationsPerMin *prometheus.Desc
ExternalActiveRemoteDeliveryQueueLength *prometheus.Desc
InternalActiveRemoteDeliveryQueueLength *prometheus.Desc
ActiveMailboxDeliveryQueueLength *prometheus.Desc
RetryMailboxDeliveryQueueLength *prometheus.Desc
UnreachableQueueLength *prometheus.Desc
ExternalLargestDeliveryQueueLength *prometheus.Desc
InternalLargestDeliveryQueueLength *prometheus.Desc
PoisonQueueLength *prometheus.Desc
MailboxServerLocatorAverageLatency *prometheus.Desc
AverageAuthenticationLatency *prometheus.Desc
AverageCASProcessingLatency *prometheus.Desc
MailboxServerProxyFailureRate *prometheus.Desc
OutstandingProxyRequests *prometheus.Desc
ProxyRequestsPerSec *prometheus.Desc
ActiveSyncRequestsPerSec *prometheus.Desc
PingCommandsPending *prometheus.Desc
SyncCommandsPerSec *prometheus.Desc
AvailabilityRequestsSec *prometheus.Desc
CurrentUniqueUsers *prometheus.Desc
OWARequestsPerSec *prometheus.Desc
AutodiscoverRequestsPerSec *prometheus.Desc
ActiveTasks *prometheus.Desc
CompletedTasks *prometheus.Desc
QueuedTasks *prometheus.Desc
YieldedTasks *prometheus.Desc
IsActive *prometheus.Desc
RPCAveragedLatency *prometheus.Desc
RPCRequests *prometheus.Desc
ActiveUserCount *prometheus.Desc
ConnectionCount *prometheus.Desc
RPCOperationsPerSec *prometheus.Desc
UserCount *prometheus.Desc
ActiveUserCountMapiHttpEmsmdb *prometheus.Desc
activeMailboxDeliveryQueueLength *prometheus.Desc
activeSyncRequestsPerSec *prometheus.Desc
activeTasks *prometheus.Desc
activeUserCount *prometheus.Desc
activeUserCountMapiHttpEmsMDB *prometheus.Desc
autoDiscoverRequestsPerSec *prometheus.Desc
availabilityRequestsSec *prometheus.Desc
averageAuthenticationLatency *prometheus.Desc
averageCASProcessingLatency *prometheus.Desc
completedTasks *prometheus.Desc
connectionCount *prometheus.Desc
currentUniqueUsers *prometheus.Desc
externalActiveRemoteDeliveryQueueLength *prometheus.Desc
externalLargestDeliveryQueueLength *prometheus.Desc
internalActiveRemoteDeliveryQueueLength *prometheus.Desc
internalLargestDeliveryQueueLength *prometheus.Desc
isActive *prometheus.Desc
ldapReadTime *prometheus.Desc
ldapSearchTime *prometheus.Desc
ldapTimeoutErrorsPerSec *prometheus.Desc
ldapWriteTime *prometheus.Desc
longRunningLDAPOperationsPerMin *prometheus.Desc
mailboxServerLocatorAverageLatency *prometheus.Desc
mailboxServerProxyFailureRate *prometheus.Desc
outstandingProxyRequests *prometheus.Desc
owaRequestsPerSec *prometheus.Desc
pingCommandsPending *prometheus.Desc
poisonQueueLength *prometheus.Desc
proxyRequestsPerSec *prometheus.Desc
queuedTasks *prometheus.Desc
retryMailboxDeliveryQueueLength *prometheus.Desc
rpcAveragedLatency *prometheus.Desc
rpcOperationsPerSec *prometheus.Desc
rpcRequests *prometheus.Desc
syncCommandsPerSec *prometheus.Desc
unreachableQueueLength *prometheus.Desc
userCount *prometheus.Desc
yieldedTasks *prometheus.Desc
enabledCollectors []string
}
// All available collector functions
var exchangeAllCollectorNames = []string{
"ADAccessProcesses",
"TransportQueues",
"HttpProxy",
"ActiveSync",
"AvailabilityService",
"OutlookWebAccess",
"Autodiscover",
"WorkloadManagement",
"RpcClientAccess",
"MapiHttpEmsmdb",
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
exchangeListAllCollectors := false
c := &collector{
exchangeCollectorsEnabled: &config.CollectorsEnabled,
exchangeListAllCollectors: &exchangeListAllCollectors,
if config.CollectorsEnabled == nil {
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
exchangeListAllCollectors: app.Flag(
FlagExchangeListAllCollectors,
"List the collectors along with their perflib object name/ids",
).Bool(),
exchangeCollectorsEnabled: app.Flag(
FlagExchangeCollectorsEnabled,
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(ConfigDefaults.CollectorsEnabled).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
c.config.CollectorsEnabled = make([]string, 0)
var listAllCollectors bool
var collectorsEnabled string
app.Flag(
"collectors.exchange.list",
"List the collectors along with their perflib object name/ids",
).BoolVar(&listAllCollectors)
app.Flag(
"collectors.exchange.enabled",
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.PreAction(func(*kingpin.ParseContext) error {
if listAllCollectors {
collectorDesc := map[string]string{
"ADAccessProcesses": "[19108] MSExchange ADAccess Processes",
"TransportQueues": "[20524] MSExchangeTransport Queues",
"HttpProxy": "[36934] MSExchange HttpProxy",
"ActiveSync": "[25138] MSExchange ActiveSync",
"AvailabilityService": "[24914] MSExchange Availability Service",
"OutlookWebAccess": "[24618] MSExchange OWA",
"Autodiscover": "[29240] MSExchange Autodiscover",
"WorkloadManagement": "[19430] MSExchange WorkloadManagement Workloads",
"RpcClientAccess": "[29336] MSExchange RpcClientAccess",
"MapiHttpEmsmdb": "[26463] MSExchange MapiHttp Emsmdb",
}
sb := strings.Builder{}
sb.WriteString(fmt.Sprintf("%-32s %-32s\n", "Collector Name", "[PerfID] Perflib Object"))
for _, cname := range ConfigDefaults.CollectorsEnabled {
sb.WriteString(fmt.Sprintf("%-32s %-32s\n", cname, collectorDesc[cname]))
}
app.UsageTemplate(sb.String()).Usage(nil)
os.Exit(0)
}
return nil
})
app.Action(func(*kingpin.ParseContext) error {
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{
"MSExchange ADAccess Processes",
"MSExchangeTransport Queues",
@@ -144,7 +182,11 @@ func (c *collector) GetPerfCounter() ([]string, error) {
}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
// desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
return prometheus.NewDesc(
@@ -155,87 +197,62 @@ func (c *collector) Build() error {
)
}
c.RPCAveragedLatency = desc("rpc_avg_latency_sec", "The latency (sec) averaged for the past 1024 packets")
c.RPCRequests = desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service")
c.ActiveUserCount = desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes")
c.ConnectionCount = desc("rpc_connection_count", "Total number of client connections maintained")
c.RPCOperationsPerSec = desc("rpc_operations_total", "The rate at which RPC operations occur")
c.UserCount = desc("rpc_user_count", "Number of users")
c.LDAPReadTime = desc("ldap_read_time_sec", "Time (sec) to send an LDAP read request and receive a response", "name")
c.LDAPSearchTime = desc("ldap_search_time_sec", "Time (sec) to send an LDAP search request and receive a response", "name")
c.LDAPWriteTime = desc("ldap_write_time_sec", "Time (sec) to send an LDAP Add/Modify/Delete request and receive a response", "name")
c.LDAPTimeoutErrorsPerSec = desc("ldap_timeout_errors_total", "Total number of LDAP timeout errors", "name")
c.LongRunningLDAPOperationsPerMin = desc("ldap_long_running_ops_per_sec", "Long Running LDAP operations per second", "name")
c.ExternalActiveRemoteDeliveryQueueLength = desc("transport_queues_external_active_remote_delivery", "External Active Remote Delivery Queue length", "name")
c.InternalActiveRemoteDeliveryQueueLength = desc("transport_queues_internal_active_remote_delivery", "Internal Active Remote Delivery Queue length", "name")
c.ActiveMailboxDeliveryQueueLength = desc("transport_queues_active_mailbox_delivery", "Active Mailbox Delivery Queue length", "name")
c.RetryMailboxDeliveryQueueLength = desc("transport_queues_retry_mailbox_delivery", "Retry Mailbox Delivery Queue length", "name")
c.UnreachableQueueLength = desc("transport_queues_unreachable", "Unreachable Queue length", "name")
c.ExternalLargestDeliveryQueueLength = desc("transport_queues_external_largest_delivery", "External Largest Delivery Queue length", "name")
c.InternalLargestDeliveryQueueLength = desc("transport_queues_internal_largest_delivery", "Internal Largest Delivery Queue length", "name")
c.PoisonQueueLength = desc("transport_queues_poison", "Poison Queue length", "name")
c.MailboxServerLocatorAverageLatency = desc("http_proxy_mailbox_server_locator_avg_latency_sec", "Average latency (sec) of MailboxServerLocator web service calls", "name")
c.AverageAuthenticationLatency = desc("http_proxy_avg_auth_latency", "Average time spent authenticating CAS requests over the last 200 samples", "name")
c.OutstandingProxyRequests = desc("http_proxy_outstanding_proxy_requests", "Number of concurrent outstanding proxy requests", "name")
c.ProxyRequestsPerSec = desc("http_proxy_requests_total", "Number of proxy requests processed each second", "name")
c.AvailabilityRequestsSec = desc("avail_service_requests_per_sec", "Number of requests serviced per second")
c.CurrentUniqueUsers = desc("owa_current_unique_users", "Number of unique users currently logged on to Outlook Web App")
c.OWARequestsPerSec = desc("owa_requests_total", "Number of requests handled by Outlook Web App per second")
c.AutodiscoverRequestsPerSec = desc("autodiscover_requests_total", "Number of autodiscover service requests processed each second")
c.ActiveTasks = desc("workload_active_tasks", "Number of active tasks currently running in the background for workload management", "name")
c.CompletedTasks = desc("workload_completed_tasks", "Number of workload management tasks that have been completed", "name")
c.QueuedTasks = desc("workload_queued_tasks", "Number of workload management tasks that are currently queued up waiting to be processed", "name")
c.YieldedTasks = desc("workload_yielded_tasks", "The total number of tasks that have been yielded by a workload", "name")
c.IsActive = desc("workload_is_active", "Active indicates whether the workload is in an active (1) or paused (0) state", "name")
c.ActiveSyncRequestsPerSec = desc("activesync_requests_total", "Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load")
c.AverageCASProcessingLatency = desc("http_proxy_avg_cas_proccessing_latency_sec", "Average latency (sec) of CAS processing time over the last 200 reqs", "name")
c.MailboxServerProxyFailureRate = desc("http_proxy_mailbox_proxy_failure_rate", "% of failures between this CAS and MBX servers over the last 200 samples", "name")
c.PingCommandsPending = desc("activesync_ping_cmds_pending", "Number of ping commands currently pending in the queue")
c.SyncCommandsPerSec = desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder")
c.ActiveUserCountMapiHttpEmsmdb = desc("mapihttp_emsmdb_active_user_count", "Number of unique outlook users that have shown some kind of activity in the last 2 minutes")
c.rpcAveragedLatency = desc("rpc_avg_latency_sec", "The latency (sec) averaged for the past 1024 packets")
c.rpcRequests = desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service")
c.activeUserCount = desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes")
c.connectionCount = desc("rpc_connection_count", "Total number of client connections maintained")
c.rpcOperationsPerSec = desc("rpc_operations_total", "The rate at which RPC operations occur")
c.userCount = desc("rpc_user_count", "Number of users")
c.ldapReadTime = desc("ldap_read_time_sec", "Time (sec) to send an LDAP read request and receive a response", "name")
c.ldapSearchTime = desc("ldap_search_time_sec", "Time (sec) to send an LDAP search request and receive a response", "name")
c.ldapWriteTime = desc("ldap_write_time_sec", "Time (sec) to send an LDAP Add/Modify/Delete request and receive a response", "name")
c.ldapTimeoutErrorsPerSec = desc("ldap_timeout_errors_total", "Total number of LDAP timeout errors", "name")
c.longRunningLDAPOperationsPerMin = desc("ldap_long_running_ops_per_sec", "Long Running LDAP operations per second", "name")
c.externalActiveRemoteDeliveryQueueLength = desc("transport_queues_external_active_remote_delivery", "External Active Remote Delivery Queue length", "name")
c.internalActiveRemoteDeliveryQueueLength = desc("transport_queues_internal_active_remote_delivery", "Internal Active Remote Delivery Queue length", "name")
c.activeMailboxDeliveryQueueLength = desc("transport_queues_active_mailbox_delivery", "Active Mailbox Delivery Queue length", "name")
c.retryMailboxDeliveryQueueLength = desc("transport_queues_retry_mailbox_delivery", "Retry Mailbox Delivery Queue length", "name")
c.unreachableQueueLength = desc("transport_queues_unreachable", "Unreachable Queue length", "name")
c.externalLargestDeliveryQueueLength = desc("transport_queues_external_largest_delivery", "External Largest Delivery Queue length", "name")
c.internalLargestDeliveryQueueLength = desc("transport_queues_internal_largest_delivery", "Internal Largest Delivery Queue length", "name")
c.poisonQueueLength = desc("transport_queues_poison", "Poison Queue length", "name")
c.mailboxServerLocatorAverageLatency = desc("http_proxy_mailbox_server_locator_avg_latency_sec", "Average latency (sec) of MailboxServerLocator web service calls", "name")
c.averageAuthenticationLatency = desc("http_proxy_avg_auth_latency", "Average time spent authenticating CAS requests over the last 200 samples", "name")
c.outstandingProxyRequests = desc("http_proxy_outstanding_proxy_requests", "Number of concurrent outstanding proxy requests", "name")
c.proxyRequestsPerSec = desc("http_proxy_requests_total", "Number of proxy requests processed each second", "name")
c.availabilityRequestsSec = desc("avail_service_requests_per_sec", "Number of requests serviced per second")
c.currentUniqueUsers = desc("owa_current_unique_users", "Number of unique users currently logged on to Outlook Web App")
c.owaRequestsPerSec = desc("owa_requests_total", "Number of requests handled by Outlook Web App per second")
c.autoDiscoverRequestsPerSec = desc("autodiscover_requests_total", "Number of autodiscover service requests processed each second")
c.activeTasks = desc("workload_active_tasks", "Number of active tasks currently running in the background for workload management", "name")
c.completedTasks = desc("workload_completed_tasks", "Number of workload management tasks that have been completed", "name")
c.queuedTasks = desc("workload_queued_tasks", "Number of workload management tasks that are currently queued up waiting to be processed", "name")
c.yieldedTasks = desc("workload_yielded_tasks", "The total number of tasks that have been yielded by a workload", "name")
c.isActive = desc("workload_is_active", "Active indicates whether the workload is in an active (1) or paused (0) state", "name")
c.activeSyncRequestsPerSec = desc("activesync_requests_total", "Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load")
c.averageCASProcessingLatency = desc("http_proxy_avg_cas_processing_latency_sec", "Average latency (sec) of CAS processing time over the last 200 reqs", "name")
c.mailboxServerProxyFailureRate = desc("http_proxy_mailbox_proxy_failure_rate", "% of failures between this CAS and MBX servers over the last 200 samples", "name")
c.pingCommandsPending = desc("activesync_ping_cmds_pending", "Number of ping commands currently pending in the queue")
c.syncCommandsPerSec = desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder")
c.activeUserCountMapiHttpEmsMDB = desc("mapihttp_emsmdb_active_user_count", "Number of unique outlook users that have shown some kind of activity in the last 2 minutes")
c.enabledCollectors = make([]string, 0, len(exchangeAllCollectorNames))
c.enabledCollectors = make([]string, 0, len(ConfigDefaults.CollectorsEnabled))
collectorDesc := map[string]string{
"ADAccessProcesses": "[19108] MSExchange ADAccess Processes",
"TransportQueues": "[20524] MSExchangeTransport Queues",
"HttpProxy": "[36934] MSExchange HttpProxy",
"ActiveSync": "[25138] MSExchange ActiveSync",
"AvailabilityService": "[24914] MSExchange Availability Service",
"OutlookWebAccess": "[24618] MSExchange OWA",
"Autodiscover": "[29240] MSExchange Autodiscover",
"WorkloadManagement": "[19430] MSExchange WorkloadManagement Workloads",
"RpcClientAccess": "[29336] MSExchange RpcClientAccess",
"MapiHttpEmsmdb": "[26463] MSExchange MapiHttp Emsmdb",
for _, collectorName := range c.config.CollectorsEnabled {
if !slices.Contains(ConfigDefaults.CollectorsEnabled, collectorName) {
return fmt.Errorf("unknown exchange collector: %s", collectorName)
}
c.enabledCollectors = append(c.enabledCollectors, collectorName)
}
if *c.exchangeListAllCollectors {
fmt.Printf("%-32s %-32s\n", "Collector Name", "[PerfID] Perflib Object")
for _, cname := range exchangeAllCollectorNames {
fmt.Printf("%-32s %-32s\n", cname, collectorDesc[cname])
}
os.Exit(0)
}
if utils.IsEmpty(c.exchangeCollectorsEnabled) {
for _, collectorName := range exchangeAllCollectorNames {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
}
} else {
for _, collectorName := range strings.Split(*c.exchangeCollectorsEnabled, ",") {
if slices.Contains(exchangeAllCollectorNames, collectorName) {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
} else {
return fmt.Errorf("unknown exchange collector: %s", collectorName)
}
}
}
c.enabledCollectors = slices.Clip(c.enabledCollectors)
return nil
}
// Collect collects exchange metrics and sends them to prometheus
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
// Collect collects exchange metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error{
"ADAccessProcesses": c.collectADAccessProcesses,
"TransportQueues": c.collectTransportQueues,
@@ -258,7 +275,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
return nil
}
// Perflib: [19108] MSExchange ADAccess Processes
// Perflib: [19108] MSExchange ADAccess Processes.
type perflibADAccessProcesses struct {
Name string
@@ -269,7 +286,7 @@ type perflibADAccessProcesses struct {
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
}
func (c *collector) collectADAccessProcesses(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectADAccessProcesses(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibADAccessProcesses
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ADAccess Processes"], &data, c.logger); err != nil {
return err
@@ -289,31 +306,31 @@ func (c *collector) collectADAccessProcesses(ctx *types.ScrapeContext, ch chan<-
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
}
ch <- prometheus.MustNewConstMetric(
c.LDAPReadTime,
c.ldapReadTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPReadTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPSearchTime,
c.ldapSearchTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPSearchTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPWriteTime,
c.ldapWriteTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPWriteTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPTimeoutErrorsPerSec,
c.ldapTimeoutErrorsPerSec,
prometheus.CounterValue,
proc.LDAPTimeoutErrorsPerSec,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LongRunningLDAPOperationsPerMin,
c.longRunningLDAPOperationsPerMin,
prometheus.CounterValue,
proc.LongRunningLDAPOperationsPerMin*60,
labelName,
@@ -322,12 +339,12 @@ func (c *collector) collectADAccessProcesses(ctx *types.ScrapeContext, ch chan<-
return nil
}
// Perflib: [24914] MSExchange Availability Service
// Perflib: [24914] MSExchange Availability Service.
type perflibAvailabilityService struct {
RequestsSec float64 `perflib:"Availability Requests (sec)"`
}
func (c *collector) collectAvailabilityService(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectAvailabilityService(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAvailabilityService
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange Availability Service"], &data, c.logger); err != nil {
return err
@@ -335,7 +352,7 @@ func (c *collector) collectAvailabilityService(ctx *types.ScrapeContext, ch chan
for _, availservice := range data {
ch <- prometheus.MustNewConstMetric(
c.AvailabilityRequestsSec,
c.availabilityRequestsSec,
prometheus.CounterValue,
availservice.RequestsSec,
)
@@ -343,7 +360,7 @@ func (c *collector) collectAvailabilityService(ctx *types.ScrapeContext, ch chan
return nil
}
// Perflib: [36934] MSExchange HttpProxy
// Perflib: [36934] MSExchange HttpProxy.
type perflibHTTPProxy struct {
Name string
@@ -355,7 +372,7 @@ type perflibHTTPProxy struct {
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
}
func (c *collector) collectHTTPProxy(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectHTTPProxy(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibHTTPProxy
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange HttpProxy"], &data, c.logger); err != nil {
return err
@@ -364,37 +381,37 @@ func (c *collector) collectHTTPProxy(ctx *types.ScrapeContext, ch chan<- prometh
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
ch <- prometheus.MustNewConstMetric(
c.MailboxServerLocatorAverageLatency,
c.mailboxServerLocatorAverageLatency,
prometheus.GaugeValue,
c.msToSec(instance.MailboxServerLocatorAverageLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.AverageAuthenticationLatency,
c.averageAuthenticationLatency,
prometheus.GaugeValue,
instance.AverageAuthenticationLatency,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.AverageCASProcessingLatency,
c.averageCASProcessingLatency,
prometheus.GaugeValue,
c.msToSec(instance.AverageCASProcessingLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.MailboxServerProxyFailureRate,
c.mailboxServerProxyFailureRate,
prometheus.GaugeValue,
instance.MailboxServerProxyFailureRate,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.OutstandingProxyRequests,
c.outstandingProxyRequests,
prometheus.GaugeValue,
instance.OutstandingProxyRequests,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ProxyRequestsPerSec,
c.proxyRequestsPerSec,
prometheus.CounterValue,
instance.ProxyRequestsPerSec,
labelName,
@@ -403,13 +420,13 @@ func (c *collector) collectHTTPProxy(ctx *types.ScrapeContext, ch chan<- prometh
return nil
}
// Perflib: [24618] MSExchange OWA
// Perflib: [24618] MSExchange OWA.
type perflibOWA struct {
CurrentUniqueUsers float64 `perflib:"Current Unique Users"`
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *collector) collectOWA(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectOWA(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibOWA
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange OWA"], &data, c.logger); err != nil {
return err
@@ -417,12 +434,12 @@ func (c *collector) collectOWA(ctx *types.ScrapeContext, ch chan<- prometheus.Me
for _, owa := range data {
ch <- prometheus.MustNewConstMetric(
c.CurrentUniqueUsers,
c.currentUniqueUsers,
prometheus.GaugeValue,
owa.CurrentUniqueUsers,
)
ch <- prometheus.MustNewConstMetric(
c.OWARequestsPerSec,
c.owaRequestsPerSec,
prometheus.CounterValue,
owa.RequestsPerSec,
)
@@ -430,14 +447,14 @@ func (c *collector) collectOWA(ctx *types.ScrapeContext, ch chan<- prometheus.Me
return nil
}
// Perflib: [25138] MSExchange ActiveSync
// Perflib: [25138] MSExchange ActiveSync.
type perflibActiveSync struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
PingCommandsPending float64 `perflib:"Ping Commands Pending"`
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
}
func (c *collector) collectActiveSync(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectActiveSync(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibActiveSync
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ActiveSync"], &data, c.logger); err != nil {
return err
@@ -445,17 +462,17 @@ func (c *collector) collectActiveSync(ctx *types.ScrapeContext, ch chan<- promet
for _, instance := range data {
ch <- prometheus.MustNewConstMetric(
c.ActiveSyncRequestsPerSec,
c.activeSyncRequestsPerSec,
prometheus.CounterValue,
instance.RequestsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.PingCommandsPending,
c.pingCommandsPending,
prometheus.GaugeValue,
instance.PingCommandsPending,
)
ch <- prometheus.MustNewConstMetric(
c.SyncCommandsPerSec,
c.syncCommandsPerSec,
prometheus.CounterValue,
instance.SyncCommandsPerSec,
)
@@ -463,7 +480,7 @@ func (c *collector) collectActiveSync(ctx *types.ScrapeContext, ch chan<- promet
return nil
}
// Perflib: [29366] MSExchange RpcClientAccess
// Perflib: [29366] MSExchange RpcClientAccess.
type perflibRPCClientAccess struct {
RPCAveragedLatency float64 `perflib:"RPC Averaged Latency"`
RPCRequests float64 `perflib:"RPC Requests"`
@@ -473,7 +490,7 @@ type perflibRPCClientAccess struct {
UserCount float64 `perflib:"User Count"`
}
func (c *collector) collectRPC(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectRPC(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibRPCClientAccess
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange RpcClientAccess"], &data, c.logger); err != nil {
return err
@@ -481,32 +498,32 @@ func (c *collector) collectRPC(ctx *types.ScrapeContext, ch chan<- prometheus.Me
for _, rpc := range data {
ch <- prometheus.MustNewConstMetric(
c.RPCAveragedLatency,
c.rpcAveragedLatency,
prometheus.GaugeValue,
c.msToSec(rpc.RPCAveragedLatency),
)
ch <- prometheus.MustNewConstMetric(
c.RPCRequests,
c.rpcRequests,
prometheus.GaugeValue,
rpc.RPCRequests,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveUserCount,
c.activeUserCount,
prometheus.GaugeValue,
rpc.ActiveUserCount,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionCount,
c.connectionCount,
prometheus.GaugeValue,
rpc.ConnectionCount,
)
ch <- prometheus.MustNewConstMetric(
c.RPCOperationsPerSec,
c.rpcOperationsPerSec,
prometheus.CounterValue,
rpc.RPCOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.UserCount,
c.userCount,
prometheus.GaugeValue,
rpc.UserCount,
)
@@ -515,7 +532,7 @@ func (c *collector) collectRPC(ctx *types.ScrapeContext, ch chan<- prometheus.Me
return nil
}
// Perflib: [20524] MSExchangeTransport Queues
// Perflib: [20524] MSExchangeTransport Queues.
type perflibTransportQueues struct {
Name string
@@ -529,7 +546,7 @@ type perflibTransportQueues struct {
PoisonQueueLength float64 `perflib:"Poison Queue Length"`
}
func (c *collector) collectTransportQueues(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectTransportQueues(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibTransportQueues
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeTransport Queues"], &data, c.logger); err != nil {
return err
@@ -541,49 +558,49 @@ func (c *collector) collectTransportQueues(ctx *types.ScrapeContext, ch chan<- p
continue
}
ch <- prometheus.MustNewConstMetric(
c.ExternalActiveRemoteDeliveryQueueLength,
c.externalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.InternalActiveRemoteDeliveryQueueLength,
c.internalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveMailboxDeliveryQueueLength,
c.activeMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.ActiveMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.RetryMailboxDeliveryQueueLength,
c.retryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.RetryMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.UnreachableQueueLength,
c.unreachableQueueLength,
prometheus.GaugeValue,
queue.UnreachableQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ExternalLargestDeliveryQueueLength,
c.externalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.InternalLargestDeliveryQueueLength,
c.internalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.PoisonQueueLength,
c.poisonQueueLength,
prometheus.GaugeValue,
queue.PoisonQueueLength,
labelName,
@@ -592,7 +609,7 @@ func (c *collector) collectTransportQueues(ctx *types.ScrapeContext, ch chan<- p
return nil
}
// Perflib: [19430] MSExchange WorkloadManagement Workloads
// Perflib: [19430] MSExchange WorkloadManagement Workloads.
type perflibWorkloadManagementWorkloads struct {
Name string
@@ -603,7 +620,7 @@ type perflibWorkloadManagementWorkloads struct {
IsActive float64 `perflib:"Active"`
}
func (c *collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibWorkloadManagementWorkloads
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange WorkloadManagement Workloads"], &data, c.logger); err != nil {
return err
@@ -615,31 +632,31 @@ func (c *collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext,
continue
}
ch <- prometheus.MustNewConstMetric(
c.ActiveTasks,
c.activeTasks,
prometheus.GaugeValue,
instance.ActiveTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.CompletedTasks,
c.completedTasks,
prometheus.CounterValue,
instance.CompletedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.QueuedTasks,
c.queuedTasks,
prometheus.CounterValue,
instance.QueuedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.YieldedTasks,
c.yieldedTasks,
prometheus.CounterValue,
instance.YieldedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.IsActive,
c.isActive,
prometheus.GaugeValue,
instance.IsActive,
labelName,
@@ -649,19 +666,19 @@ func (c *collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext,
return nil
}
// [29240] MSExchangeAutodiscover
// [29240] MSExchangeAutodiscover.
type perflibAutodiscover struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *collector) collectAutoDiscover(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectAutoDiscover(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAutodiscover
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeAutodiscover"], &data, c.logger); err != nil {
return err
}
for _, autodisc := range data {
ch <- prometheus.MustNewConstMetric(
c.AutodiscoverRequestsPerSec,
c.autoDiscoverRequestsPerSec,
prometheus.CounterValue,
autodisc.RequestsPerSec,
)
@@ -669,12 +686,12 @@ func (c *collector) collectAutoDiscover(ctx *types.ScrapeContext, ch chan<- prom
return nil
}
// perflib [26463] MSExchange MapiHttp Emsmdb
// perflib [26463] MSExchange MapiHttp Emsmdb.
type perflibMapiHttpEmsmdb struct {
ActiveUserCount float64 `perflib:"Active User Count"`
}
func (c *collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibMapiHttpEmsmdb
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange MapiHttp Emsmdb"], &data, c.logger); err != nil {
return err
@@ -682,7 +699,7 @@ func (c *collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, ch chan<- pr
for _, mapihttp := range data {
ch <- prometheus.MustNewConstMetric(
c.ActiveUserCountMapiHttpEmsmdb,
c.activeUserCountMapiHttpEmsMDB,
prometheus.GaugeValue,
mapihttp.ActiveUserCount,
)
@@ -691,14 +708,14 @@ func (c *collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, ch chan<- pr
return nil
}
// toLabelName converts strings to lowercase and replaces all whitespaces and dots with underscores
func (c *collector) toLabelName(name string) string {
// toLabelName converts strings to lowercase and replaces all whitespaces and dots with underscores.
func (c *Collector) toLabelName(name string) string {
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
s = strings.ReplaceAll(s, "__", "_")
return s
}
// msToSec converts from ms to seconds
func (c *collector) msToSec(t float64) float64 {
// msToSec converts from ms to seconds.
func (c *Collector) msToSec(t float64) float64 {
return t / 1000
}

View File

@@ -3,13 +3,12 @@
package fsrmquota
import (
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
@@ -19,94 +18,106 @@ type Config struct{}
var ConfigDefaults = Config{}
type collector struct {
type Collector struct {
config Config
logger log.Logger
QuotasCount *prometheus.Desc
Path *prometheus.Desc
PeakUsage *prometheus.Desc
Size *prometheus.Desc
Usage *prometheus.Desc
quotasCount *prometheus.Desc
peakUsage *prometheus.Desc
size *prometheus.Desc
usage *prometheus.Desc
Description *prometheus.Desc
Disabled *prometheus.Desc
MatchesTemplate *prometheus.Desc
SoftLimit *prometheus.Desc
Template *prometheus.Desc
description *prometheus.Desc
disabled *prometheus.Desc
matchesTemplate *prometheus.Desc
softLimit *prometheus.Desc
template *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.QuotasCount = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.quotasCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "count"),
"Number of Quotas",
nil,
nil,
)
c.PeakUsage = prometheus.NewDesc(
c.peakUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "peak_usage_bytes"),
"The highest amount of disk space usage charged to this quota. (PeakUsage)",
[]string{"path", "template"},
nil,
)
c.Size = prometheus.NewDesc(
c.size = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "size_bytes"),
"The size of the quota. (Size)",
[]string{"path", "template"},
nil,
)
c.Usage = prometheus.NewDesc(
c.usage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "usage_bytes"),
"The current amount of disk space usage charged to this quota. (Usage)",
[]string{"path", "template"},
nil,
)
c.Description = prometheus.NewDesc(
c.description = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "description"),
"Description of the quota (Description)",
[]string{"path", "template", "description"},
nil,
)
c.Disabled = prometheus.NewDesc(
c.disabled = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "disabled"),
"If 1, the quota is disabled. The default value is 0. (Disabled)",
[]string{"path", "template"},
nil,
)
c.SoftLimit = prometheus.NewDesc(
c.softLimit = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "softlimit"),
"If 1, the quota is a soft limit. If 0, the quota is a hard limit. The default value is 0. Optional (SoftLimit)",
[]string{"path", "template"},
nil,
)
c.Template = prometheus.NewDesc(
c.template = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "template"),
"Quota template name. (Template)",
[]string{"path", "template"},
nil,
)
c.MatchesTemplate = prometheus.NewDesc(
c.matchesTemplate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "matchestemplate"),
"If 1, the property values of this quota match those values of the template from which it was derived. (MatchesTemplate)",
[]string{"path", "template"},
@@ -117,7 +128,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "err", err)
return err
@@ -142,7 +153,7 @@ type MSFT_FSRMQuota struct {
SoftLimit bool
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []MSFT_FSRMQuota
q := wmi.QueryAll(&dst, c.logger)
@@ -153,55 +164,54 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, quota := range dst {
count++
path := quota.Path
template := quota.Template
Description := quota.Description
ch <- prometheus.MustNewConstMetric(
c.PeakUsage,
c.peakUsage,
prometheus.GaugeValue,
float64(quota.PeakUsage),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.Size,
c.size,
prometheus.GaugeValue,
float64(quota.Size),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.Usage,
c.usage,
prometheus.GaugeValue,
float64(quota.Usage),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.Description,
c.description,
prometheus.GaugeValue,
1.0,
path, template, Description,
)
ch <- prometheus.MustNewConstMetric(
c.Disabled,
c.disabled,
prometheus.GaugeValue,
utils.BoolToFloat(quota.Disabled),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.MatchesTemplate,
c.matchesTemplate,
prometheus.GaugeValue,
utils.BoolToFloat(quota.MatchesTemplate),
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.SoftLimit,
c.softLimit,
prometheus.GaugeValue,
utils.BoolToFloat(quota.SoftLimit),
path,
@@ -210,7 +220,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.QuotasCount,
c.quotasCount,
prometheus.GaugeValue,
float64(count),
)

View File

@@ -11,7 +11,6 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/collectors/version"
@@ -20,7 +19,7 @@ import (
func (c *Collectors) BuildServeHTTP(disableExporterMetrics bool, timeoutMargin float64) http.HandlerFunc {
collectorFactory := func(timeout time.Duration, requestedCollectors []string) (error, *Prometheus) {
filteredCollectors := make(map[string]types.Collector)
filteredCollectors := make(map[string]Collector)
// scrape all enabled collectors if no collector is requested
if len(requestedCollectors) == 0 {
filteredCollectors = c.collectors
@@ -56,7 +55,7 @@ func (c *Collectors) BuildServeHTTP(disableExporterMetrics bool, timeoutMargin f
if timeoutSeconds == 0 {
timeoutSeconds = defaultTimeout
}
timeoutSeconds = timeoutSeconds - timeoutMargin
timeoutSeconds -= timeoutMargin
reg := prometheus.NewRegistry()
err, wc := collectorFactory(time.Duration(timeoutSeconds*float64(time.Second)), r.URL.Query()["collect[]"])

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,8 @@ import (
)
func TestIISDeduplication(t *testing.T) {
t.Parallel()
start := []perflibAPP_POOL_WAS{
{
Name: "foo",

View File

@@ -6,10 +6,9 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus-community/windows_exporter/pkg/headers/slc"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "license"
@@ -26,37 +25,50 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
type Collector struct {
config Config
logger log.Logger
LicenseStatus *prometheus.Desc
licenseStatus *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.LicenseStatus = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.licenseStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status"),
"Status of windows license",
[]string{"state"},
@@ -68,7 +80,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting license metrics", "err", err)
return err
@@ -76,7 +88,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
status, err := slc.SLIsWindowsGenuineLocal()
if err != nil {
return err
@@ -88,7 +100,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
val = 1.0
}
ch <- prometheus.MustNewConstMetric(c.LicenseStatus, prometheus.GaugeValue, val, v)
ch <- prometheus.MustNewConstMetric(c.licenseStatus, prometheus.GaugeValue, val, v)
}
return nil

View File

@@ -10,61 +10,50 @@ import (
"strconv"
"strings"
"golang.org/x/sys/windows"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
)
const (
Name = "logical_disk"
FlagLogicalDiskVolumeExclude = "collector.logical_disk.volume-exclude"
FlagLogicalDiskVolumeInclude = "collector.logical_disk.volume-include"
)
const Name = "logical_disk"
type Config struct {
VolumeInclude string `yaml:"volume_include"`
VolumeExclude string `yaml:"volume_exclude"`
VolumeInclude *regexp.Regexp `yaml:"volume_include"`
VolumeExclude *regexp.Regexp `yaml:"volume_exclude"`
}
var ConfigDefaults = Config{
VolumeInclude: ".+",
VolumeExclude: "",
VolumeInclude: types.RegExpAny,
VolumeExclude: types.RegExpEmpty,
}
// A collector is a Prometheus collector for perflib logicalDisk metrics
type collector struct {
// A Collector is a Prometheus Collector for perflib logicalDisk metrics.
type Collector struct {
config Config
logger log.Logger
volumeInclude *string
volumeExclude *string
Information *prometheus.Desc
ReadOnly *prometheus.Desc
RequestsQueued *prometheus.Desc
AvgReadQueue *prometheus.Desc
AvgWriteQueue *prometheus.Desc
ReadBytesTotal *prometheus.Desc
ReadsTotal *prometheus.Desc
WriteBytesTotal *prometheus.Desc
WritesTotal *prometheus.Desc
ReadTime *prometheus.Desc
WriteTime *prometheus.Desc
TotalSpace *prometheus.Desc
FreeSpace *prometheus.Desc
IdleTime *prometheus.Desc
SplitIOs *prometheus.Desc
ReadLatency *prometheus.Desc
WriteLatency *prometheus.Desc
ReadWriteLatency *prometheus.Desc
volumeIncludePattern *regexp.Regexp
volumeExcludePattern *regexp.Regexp
avgReadQueue *prometheus.Desc
avgWriteQueue *prometheus.Desc
freeSpace *prometheus.Desc
idleTime *prometheus.Desc
information *prometheus.Desc
readBytesTotal *prometheus.Desc
readLatency *prometheus.Desc
readOnly *prometheus.Desc
readsTotal *prometheus.Desc
readTime *prometheus.Desc
readWriteLatency *prometheus.Desc
requestsQueued *prometheus.Desc
splitIOs *prometheus.Desc
totalSpace *prometheus.Desc
writeBytesTotal *prometheus.Desc
writeLatency *prometheus.Desc
writesTotal *prometheus.Desc
writeTime *prometheus.Desc
}
type volumeInfo struct {
@@ -75,188 +64,211 @@ type volumeInfo struct {
readonly float64
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
volumeExclude: &config.VolumeExclude,
volumeInclude: &config.VolumeInclude,
if config.VolumeExclude == nil {
config.VolumeExclude = ConfigDefaults.VolumeExclude
}
if config.VolumeInclude == nil {
config.VolumeInclude = ConfigDefaults.VolumeInclude
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{
volumeInclude: app.Flag(
FlagLogicalDiskVolumeInclude,
"Regexp of volumes to include. Volume name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.VolumeInclude).String(),
volumeExclude: app.Flag(
FlagLogicalDiskVolumeExclude,
"Regexp of volumes to exclude. Volume name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.VolumeExclude).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var volumeExclude, volumeInclude string
app.Flag(
"collector.logical_disk.volume-exclude",
"Regexp of volumes to exclude. Volume name must both match include and not match exclude to be included.",
).Default(c.config.VolumeExclude.String()).StringVar(&volumeExclude)
app.Flag(
"collector.logical_disk.volume-include",
"Regexp of volumes to include. Volume name must both match include and not match exclude to be included.",
).Default(c.config.VolumeInclude.String()).StringVar(&volumeInclude)
app.Action(func(*kingpin.ParseContext) error {
var err error
c.config.VolumeExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", volumeExclude))
if err != nil {
return fmt.Errorf("collector.logical_disk.volume-exclude: %w", err)
}
c.config.VolumeInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", volumeInclude))
if err != nil {
return fmt.Errorf("collector.logical_disk.volume-include: %w", err)
}
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"LogicalDisk"}, nil
}
func (c *collector) Build() error {
c.Information = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.information = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with logical disk information",
[]string{"disk", "type", "volume", "volume_name", "filesystem", "serial_number"},
nil,
)
c.ReadOnly = prometheus.NewDesc(
c.readOnly = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "readonly"),
"Whether the logical disk is read-only",
[]string{"volume"},
nil,
)
c.RequestsQueued = prometheus.NewDesc(
c.requestsQueued = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
"The number of requests queued to the disk (LogicalDisk.CurrentDiskQueueLength)",
[]string{"volume"},
nil,
)
c.AvgReadQueue = prometheus.NewDesc(
c.avgReadQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "avg_read_requests_queued"),
"Average number of read requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskReadQueueLength)",
[]string{"volume"},
nil,
)
c.AvgWriteQueue = prometheus.NewDesc(
c.avgWriteQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "avg_write_requests_queued"),
"Average number of write requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskWriteQueueLength)",
[]string{"volume"},
nil,
)
c.ReadBytesTotal = prometheus.NewDesc(
c.readBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_bytes_total"),
"The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec)",
[]string{"volume"},
nil,
)
c.ReadsTotal = prometheus.NewDesc(
c.readsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "reads_total"),
"The number of read operations on the disk (LogicalDisk.DiskReadsPerSec)",
[]string{"volume"},
nil,
)
c.WriteBytesTotal = prometheus.NewDesc(
c.writeBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_bytes_total"),
"The number of bytes transferred to the disk during write operations (LogicalDisk.DiskWriteBytesPerSec)",
[]string{"volume"},
nil,
)
c.WritesTotal = prometheus.NewDesc(
c.writesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "writes_total"),
"The number of write operations on the disk (LogicalDisk.DiskWritesPerSec)",
[]string{"volume"},
nil,
)
c.ReadTime = prometheus.NewDesc(
c.readTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_seconds_total"),
"Seconds that the disk was busy servicing read requests (LogicalDisk.PercentDiskReadTime)",
[]string{"volume"},
nil,
)
c.WriteTime = prometheus.NewDesc(
c.writeTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_seconds_total"),
"Seconds that the disk was busy servicing write requests (LogicalDisk.PercentDiskWriteTime)",
[]string{"volume"},
nil,
)
c.FreeSpace = prometheus.NewDesc(
c.freeSpace = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "free_bytes"),
"Free space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace)",
[]string{"volume"},
nil,
)
c.TotalSpace = prometheus.NewDesc(
c.totalSpace = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "size_bytes"),
"Total space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace_Base)",
[]string{"volume"},
nil,
)
c.IdleTime = prometheus.NewDesc(
c.idleTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "idle_seconds_total"),
"Seconds that the disk was idle (LogicalDisk.PercentIdleTime)",
[]string{"volume"},
nil,
)
c.SplitIOs = prometheus.NewDesc(
c.splitIOs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "split_ios_total"),
"The number of I/Os to the disk were split into multiple I/Os (LogicalDisk.SplitIOPerSec)",
[]string{"volume"},
nil,
)
c.ReadLatency = prometheus.NewDesc(
c.readLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_latency_seconds_total"),
"Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)",
[]string{"volume"},
nil,
)
c.WriteLatency = prometheus.NewDesc(
c.writeLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_latency_seconds_total"),
"Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)",
[]string{"volume"},
nil,
)
c.ReadWriteLatency = prometheus.NewDesc(
c.readWriteLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_write_latency_seconds_total"),
"Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)",
[]string{"volume"},
nil,
)
var err error
c.volumeIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.volumeInclude))
if err != nil {
return err
}
c.volumeExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.volumeExclude))
if err != nil {
return err
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting logical_disk metrics", "err", err)
return err
@@ -266,7 +278,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference.
type logicalDisk struct {
Name string
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
@@ -287,7 +299,7 @@ type logicalDisk struct {
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var (
err error
diskID string
@@ -301,8 +313,8 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
for _, volume := range dst {
if volume.Name == "_Total" ||
c.volumeExcludePattern.MatchString(volume.Name) ||
!c.volumeIncludePattern.MatchString(volume.Name) {
c.config.VolumeExclude.MatchString(volume.Name) ||
!c.config.VolumeInclude.MatchString(volume.Name) {
continue
}
@@ -317,7 +329,7 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
}
ch <- prometheus.MustNewConstMetric(
c.Information,
c.information,
prometheus.GaugeValue,
1,
diskID,
@@ -329,112 +341,112 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
)
ch <- prometheus.MustNewConstMetric(
c.RequestsQueued,
c.requestsQueued,
prometheus.GaugeValue,
volume.CurrentDiskQueueLength,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.AvgReadQueue,
c.avgReadQueue,
prometheus.GaugeValue,
volume.AvgDiskReadQueueLength*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.AvgWriteQueue,
c.avgWriteQueue,
prometheus.GaugeValue,
volume.AvgDiskWriteQueueLength*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTotal,
c.readBytesTotal,
prometheus.CounterValue,
volume.DiskReadBytesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadsTotal,
c.readsTotal,
prometheus.CounterValue,
volume.DiskReadsPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTotal,
c.writeBytesTotal,
prometheus.CounterValue,
volume.DiskWriteBytesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WritesTotal,
c.writesTotal,
prometheus.CounterValue,
volume.DiskWritesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadTime,
c.readTime,
prometheus.CounterValue,
volume.PercentDiskReadTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WriteTime,
c.writeTime,
prometheus.CounterValue,
volume.PercentDiskWriteTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FreeSpace,
c.freeSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace_Base*1024*1024,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalSpace,
c.totalSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace*1024*1024,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.IdleTime,
c.idleTime,
prometheus.CounterValue,
volume.PercentIdleTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SplitIOs,
c.splitIOs,
prometheus.CounterValue,
volume.SplitIOPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadLatency,
c.readLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerRead*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WriteLatency,
c.writeLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerWrite*perflib.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ReadWriteLatency,
c.readWriteLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerTransfer*perflib.TicksToSecondScaleFactor,
volume.Name,
@@ -480,7 +492,6 @@ func getDiskIDByVolume(rootDrive string) (string, error) {
f, err = windows.CreateFile(
windows.StringToUTF16Ptr(`\\.\`+rootDrive),
0, mode, nil, windows.OPEN_EXISTING, uint32(windows.FILE_ATTRIBUTE_READONLY), 0)
if err != nil {
return "", err
}
@@ -529,7 +540,6 @@ func getVolumeInfo(rootDrive string) (volumeInfo, error) {
err := windows.GetVolumeInformation(volPath, &volBufLabel[0], uint32(len(volBufLabel)),
&volSerialNum, nil, &fsFlags, &volBufType[0], uint32(len(volBufType)))
if err != nil {
if driveType != windows.DRIVE_CDROM && driveType != windows.DRIVE_REMOVABLE {
return volumeInfo{}, err

View File

@@ -9,8 +9,8 @@ import (
)
func BenchmarkCollector(b *testing.B) {
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all disks.
// Whitelist is not set in testing context (kingpin flags not parsed), causing the Collector to skip all disks.
localVolumeInclude := ".+"
kingpin.CommandLine.GetArg(logical_disk.FlagLogicalDiskVolumeInclude).StringVar(&localVolumeInclude)
kingpin.CommandLine.GetArg("collector.logical_disk.volume-include").StringVar(&localVolumeInclude)
testutils.FuncBenchmarkCollector(b, "logical_disk", logical_disk.NewWithFlags)
}

View File

@@ -5,12 +5,11 @@ package logon
import (
"errors"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
@@ -20,37 +19,50 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI metrics.
type Collector struct {
config Config
logger log.Logger
LogonType *prometheus.Desc
logonType *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.LogonType = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.logonType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logon_type"),
"Number of active logon sessions (LogonSession.LogonType)",
[]string{"status"},
@@ -61,7 +73,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting user metrics", "err", err)
return err
@@ -75,7 +87,7 @@ type Win32_LogonSession struct {
LogonType uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_LogonSession
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -132,91 +144,91 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(system),
"system",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(interactive),
"interactive",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(network),
"network",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(batch),
"batch",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(service),
"service",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(proxy),
"proxy",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(unlock),
"unlock",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(networkcleartext),
"network_clear_text",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(newcredentials),
"new_credentials",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(remoteinteractive),
"remote_interactive",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(cachedinteractive),
"cached_interactive",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(remoteinteractive),
"cached_remote_interactive",
)
ch <- prometheus.MustNewConstMetric(
c.LogonType,
c.logonType,
prometheus.GaugeValue,
float64(cachedunlock),
"cached_unlock",

View File

@@ -8,6 +8,6 @@ import (
)
func BenchmarkCollector(b *testing.B) {
// No context name required as collector source is WMI
// No context name required as Collector source is WMI
testutils.FuncBenchmarkCollector(b, logon.Name, logon.NewWithFlags)
}

View File

@@ -57,70 +57,68 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/time"
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware"
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware_blast"
"github.com/prometheus-community/windows_exporter/pkg/types"
"golang.org/x/exp/maps"
)
var Map = map[string]types.CollectorBuilderWithFlags{
ad.Name: ad.NewWithFlags,
adcs.Name: adcs.NewWithFlags,
adfs.Name: adfs.NewWithFlags,
cache.Name: cache.NewWithFlags,
container.Name: container.NewWithFlags,
cpu.Name: cpu.NewWithFlags,
cpu_info.Name: cpu_info.NewWithFlags,
cs.Name: cs.NewWithFlags,
dfsr.Name: dfsr.NewWithFlags,
dhcp.Name: dhcp.NewWithFlags,
diskdrive.Name: diskdrive.NewWithFlags,
dns.Name: dns.NewWithFlags,
exchange.Name: exchange.NewWithFlags,
fsrmquota.Name: fsrmquota.NewWithFlags,
hyperv.Name: hyperv.NewWithFlags,
iis.Name: iis.NewWithFlags,
license.Name: license.NewWithFlags,
logical_disk.Name: logical_disk.NewWithFlags,
logon.Name: logon.NewWithFlags,
memory.Name: memory.NewWithFlags,
mscluster_cluster.Name: mscluster_cluster.NewWithFlags,
mscluster_network.Name: mscluster_network.NewWithFlags,
mscluster_node.Name: mscluster_node.NewWithFlags,
mscluster_resource.Name: mscluster_resource.NewWithFlags,
mscluster_resourcegroup.Name: mscluster_resourcegroup.NewWithFlags,
msmq.Name: msmq.NewWithFlags,
mssql.Name: mssql.NewWithFlags,
net.Name: net.NewWithFlags,
netframework_clrexceptions.Name: netframework_clrexceptions.NewWithFlags,
netframework_clrinterop.Name: netframework_clrinterop.NewWithFlags,
netframework_clrjit.Name: netframework_clrjit.NewWithFlags,
netframework_clrloading.Name: netframework_clrloading.NewWithFlags,
netframework_clrlocksandthreads.Name: netframework_clrlocksandthreads.NewWithFlags,
netframework_clrmemory.Name: netframework_clrmemory.NewWithFlags,
netframework_clrremoting.Name: netframework_clrremoting.NewWithFlags,
netframework_clrsecurity.Name: netframework_clrsecurity.NewWithFlags,
nps.Name: nps.NewWithFlags,
os.Name: os.NewWithFlags,
physical_disk.Name: physical_disk.NewWithFlags,
printer.Name: printer.NewWithFlags,
process.Name: process.NewWithFlags,
remote_fx.Name: remote_fx.NewWithFlags,
scheduled_task.Name: scheduled_task.NewWithFlags,
service.Name: service.NewWithFlags,
smb.Name: smb.NewWithFlags,
smbclient.Name: smbclient.NewWithFlags,
smtp.Name: smtp.NewWithFlags,
system.Name: system.NewWithFlags,
teradici_pcoip.Name: teradici_pcoip.NewWithFlags,
tcp.Name: tcp.NewWithFlags,
terminal_services.Name: terminal_services.NewWithFlags,
textfile.Name: textfile.NewWithFlags,
thermalzone.Name: thermalzone.NewWithFlags,
time.Name: time.NewWithFlags,
vmware.Name: vmware.NewWithFlags,
vmware_blast.Name: vmware_blast.NewWithFlags,
var BuildersWithFlags = map[string]BuilderWithFlags[Collector]{
ad.Name: NewBuilderWithFlags(ad.NewWithFlags),
adcs.Name: NewBuilderWithFlags(adcs.NewWithFlags),
adfs.Name: NewBuilderWithFlags(adfs.NewWithFlags),
cache.Name: NewBuilderWithFlags(cache.NewWithFlags),
container.Name: NewBuilderWithFlags(container.NewWithFlags),
cpu.Name: NewBuilderWithFlags(cpu.NewWithFlags),
cpu_info.Name: NewBuilderWithFlags(cpu_info.NewWithFlags),
cs.Name: NewBuilderWithFlags(cs.NewWithFlags),
dfsr.Name: NewBuilderWithFlags(dfsr.NewWithFlags),
dhcp.Name: NewBuilderWithFlags(dhcp.NewWithFlags),
diskdrive.Name: NewBuilderWithFlags(diskdrive.NewWithFlags),
dns.Name: NewBuilderWithFlags(dns.NewWithFlags),
exchange.Name: NewBuilderWithFlags(exchange.NewWithFlags),
fsrmquota.Name: NewBuilderWithFlags(fsrmquota.NewWithFlags),
hyperv.Name: NewBuilderWithFlags(hyperv.NewWithFlags),
iis.Name: NewBuilderWithFlags(iis.NewWithFlags),
license.Name: NewBuilderWithFlags(license.NewWithFlags),
logical_disk.Name: NewBuilderWithFlags(logical_disk.NewWithFlags),
logon.Name: NewBuilderWithFlags(logon.NewWithFlags),
memory.Name: NewBuilderWithFlags(memory.NewWithFlags),
mscluster_cluster.Name: NewBuilderWithFlags(mscluster_cluster.NewWithFlags),
mscluster_network.Name: NewBuilderWithFlags(mscluster_network.NewWithFlags),
mscluster_node.Name: NewBuilderWithFlags(mscluster_node.NewWithFlags),
mscluster_resource.Name: NewBuilderWithFlags(mscluster_resource.NewWithFlags),
mscluster_resourcegroup.Name: NewBuilderWithFlags(mscluster_resourcegroup.NewWithFlags),
msmq.Name: NewBuilderWithFlags(msmq.NewWithFlags),
mssql.Name: NewBuilderWithFlags(mssql.NewWithFlags),
net.Name: NewBuilderWithFlags(net.NewWithFlags),
netframework_clrexceptions.Name: NewBuilderWithFlags(netframework_clrexceptions.NewWithFlags),
netframework_clrinterop.Name: NewBuilderWithFlags(netframework_clrinterop.NewWithFlags),
netframework_clrjit.Name: NewBuilderWithFlags(netframework_clrjit.NewWithFlags),
netframework_clrloading.Name: NewBuilderWithFlags(netframework_clrloading.NewWithFlags),
netframework_clrlocksandthreads.Name: NewBuilderWithFlags(netframework_clrlocksandthreads.NewWithFlags),
netframework_clrmemory.Name: NewBuilderWithFlags(netframework_clrmemory.NewWithFlags),
netframework_clrremoting.Name: NewBuilderWithFlags(netframework_clrremoting.NewWithFlags),
netframework_clrsecurity.Name: NewBuilderWithFlags(netframework_clrsecurity.NewWithFlags),
nps.Name: NewBuilderWithFlags(nps.NewWithFlags),
os.Name: NewBuilderWithFlags(os.NewWithFlags),
physical_disk.Name: NewBuilderWithFlags(physical_disk.NewWithFlags),
printer.Name: NewBuilderWithFlags(printer.NewWithFlags),
process.Name: NewBuilderWithFlags(process.NewWithFlags),
remote_fx.Name: NewBuilderWithFlags(remote_fx.NewWithFlags),
scheduled_task.Name: NewBuilderWithFlags(scheduled_task.NewWithFlags),
service.Name: NewBuilderWithFlags(service.NewWithFlags),
smb.Name: NewBuilderWithFlags(smb.NewWithFlags),
smbclient.Name: NewBuilderWithFlags(smbclient.NewWithFlags),
smtp.Name: NewBuilderWithFlags(smtp.NewWithFlags),
system.Name: NewBuilderWithFlags(system.NewWithFlags),
teradici_pcoip.Name: NewBuilderWithFlags(teradici_pcoip.NewWithFlags),
tcp.Name: NewBuilderWithFlags(tcp.NewWithFlags),
terminal_services.Name: NewBuilderWithFlags(terminal_services.NewWithFlags),
textfile.Name: NewBuilderWithFlags(textfile.NewWithFlags),
thermalzone.Name: NewBuilderWithFlags(thermalzone.NewWithFlags),
time.Name: NewBuilderWithFlags(time.NewWithFlags),
vmware.Name: NewBuilderWithFlags(vmware.NewWithFlags),
vmware_blast.Name: NewBuilderWithFlags(vmware_blast.NewWithFlags),
}
func Available() []string {
return maps.Keys(Map)
return maps.Keys(BuildersWithFlags)
}

View File

@@ -20,266 +20,279 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for perflib Memory metrics
type collector struct {
// A Collector is a Prometheus Collector for perflib Memory metrics.
type Collector struct {
config Config
logger log.Logger
AvailableBytes *prometheus.Desc
CacheBytes *prometheus.Desc
CacheBytesPeak *prometheus.Desc
CacheFaultsTotal *prometheus.Desc
CommitLimit *prometheus.Desc
CommittedBytes *prometheus.Desc
DemandZeroFaultsTotal *prometheus.Desc
FreeAndZeroPageListBytes *prometheus.Desc
FreeSystemPageTableEntries *prometheus.Desc
ModifiedPageListBytes *prometheus.Desc
PageFaultsTotal *prometheus.Desc
SwapPageReadsTotal *prometheus.Desc
SwapPagesReadTotal *prometheus.Desc
SwapPagesWrittenTotal *prometheus.Desc
SwapPageOperationsTotal *prometheus.Desc
SwapPageWritesTotal *prometheus.Desc
PoolNonpagedAllocsTotal *prometheus.Desc
PoolNonpagedBytes *prometheus.Desc
PoolPagedAllocsTotal *prometheus.Desc
PoolPagedBytes *prometheus.Desc
PoolPagedResidentBytes *prometheus.Desc
StandbyCacheCoreBytes *prometheus.Desc
StandbyCacheNormalPriorityBytes *prometheus.Desc
StandbyCacheReserveBytes *prometheus.Desc
SystemCacheResidentBytes *prometheus.Desc
SystemCodeResidentBytes *prometheus.Desc
SystemCodeTotalBytes *prometheus.Desc
SystemDriverResidentBytes *prometheus.Desc
SystemDriverTotalBytes *prometheus.Desc
TransitionFaultsTotal *prometheus.Desc
TransitionPagesRepurposedTotal *prometheus.Desc
WriteCopiesTotal *prometheus.Desc
availableBytes *prometheus.Desc
cacheBytes *prometheus.Desc
cacheBytesPeak *prometheus.Desc
cacheFaultsTotal *prometheus.Desc
commitLimit *prometheus.Desc
committedBytes *prometheus.Desc
demandZeroFaultsTotal *prometheus.Desc
freeAndZeroPageListBytes *prometheus.Desc
freeSystemPageTableEntries *prometheus.Desc
modifiedPageListBytes *prometheus.Desc
pageFaultsTotal *prometheus.Desc
swapPageReadsTotal *prometheus.Desc
swapPagesReadTotal *prometheus.Desc
swapPagesWrittenTotal *prometheus.Desc
swapPageOperationsTotal *prometheus.Desc
swapPageWritesTotal *prometheus.Desc
poolNonPagedAllocationsTotal *prometheus.Desc
poolNonPagedBytes *prometheus.Desc
poolPagedAllocationsTotal *prometheus.Desc
poolPagedBytes *prometheus.Desc
poolPagedResidentBytes *prometheus.Desc
standbyCacheCoreBytes *prometheus.Desc
standbyCacheNormalPriorityBytes *prometheus.Desc
standbyCacheReserveBytes *prometheus.Desc
systemCacheResidentBytes *prometheus.Desc
systemCodeResidentBytes *prometheus.Desc
systemCodeTotalBytes *prometheus.Desc
systemDriverResidentBytes *prometheus.Desc
systemDriverTotalBytes *prometheus.Desc
transitionFaultsTotal *prometheus.Desc
transitionPagesRepurposedTotal *prometheus.Desc
writeCopiesTotal *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *collector) Build() error {
c.AvailableBytes = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.availableBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
" the standby (cached), free and zero page lists (AvailableBytes)",
nil,
nil,
)
c.CacheBytes = prometheus.NewDesc(
c.cacheBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cache_bytes"),
"(CacheBytes)",
nil,
nil,
)
c.CacheBytesPeak = prometheus.NewDesc(
c.cacheBytesPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cache_bytes_peak"),
"(CacheBytesPeak)",
nil,
nil,
)
c.CacheFaultsTotal = prometheus.NewDesc(
c.cacheFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cache_faults_total"),
"Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) "+
"or from disk (hard fault) (Cache Faults/sec)",
nil,
nil,
)
c.CommitLimit = prometheus.NewDesc(
c.commitLimit = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "commit_limit"),
"(CommitLimit)",
nil,
nil,
)
c.CommittedBytes = prometheus.NewDesc(
c.committedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "committed_bytes"),
"(CommittedBytes)",
nil,
nil,
)
c.DemandZeroFaultsTotal = prometheus.NewDesc(
c.demandZeroFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "demand_zero_faults_total"),
"The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security"+
" feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (Demand Zero Faults/sec)",
nil,
nil,
)
c.FreeAndZeroPageListBytes = prometheus.NewDesc(
c.freeAndZeroPageListBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "free_and_zero_page_list_bytes"),
"The amount of physical memory, in bytes, that is assigned to the free and zero page lists. This memory does not contain cached data. It is immediately"+
" available for allocation to a process or for system use (FreeAndZeroPageListBytes)",
nil,
nil,
)
c.FreeSystemPageTableEntries = prometheus.NewDesc(
c.freeSystemPageTableEntries = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "free_system_page_table_entries"),
"(FreeSystemPageTableEntries)",
nil,
nil,
)
c.ModifiedPageListBytes = prometheus.NewDesc(
c.modifiedPageListBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "modified_page_list_bytes"),
"The amount of physical memory, in bytes, that is assigned to the modified page list. This memory contains cached data and code that is not actively in "+
"use by processes, the system and the system cache (ModifiedPageListBytes)",
nil,
nil,
)
c.PageFaultsTotal = prometheus.NewDesc(
c.pageFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_faults_total"),
"Overall rate at which faulted pages are handled by the processor (Page Faults/sec)",
nil,
nil,
)
c.SwapPageReadsTotal = prometheus.NewDesc(
c.swapPageReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_page_reads_total"),
"Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec)",
nil,
nil,
)
c.SwapPagesReadTotal = prometheus.NewDesc(
c.swapPagesReadTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_pages_read_total"),
"Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec)",
nil,
nil,
)
c.SwapPagesWrittenTotal = prometheus.NewDesc(
c.swapPagesWrittenTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_pages_written_total"),
"Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec)",
nil,
nil,
)
c.SwapPageOperationsTotal = prometheus.NewDesc(
c.swapPageOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_page_operations_total"),
"Total number of swap page read and writes (PagesPersec)",
nil,
nil,
)
c.SwapPageWritesTotal = prometheus.NewDesc(
c.swapPageWritesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "swap_page_writes_total"),
"Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec)",
nil,
nil,
)
c.PoolNonpagedAllocsTotal = prometheus.NewDesc(
c.poolNonPagedAllocationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_nonpaged_allocs_total"),
"The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written"+
" to disk, and must remain in physical memory as long as they are allocated (PoolNonpagedAllocs)",
nil,
nil,
)
c.PoolNonpagedBytes = prometheus.NewDesc(
c.poolNonPagedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_nonpaged_bytes"),
"Number of bytes in the non-paged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must "+
"remain in physical memory as long as they are allocated (PoolNonpagedBytes)",
nil,
nil,
)
c.PoolPagedAllocsTotal = prometheus.NewDesc(
c.poolPagedAllocationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_paged_allocs_total"),
"Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call (PoolPagedAllocs)",
nil,
nil,
)
c.PoolPagedBytes = prometheus.NewDesc(
c.poolPagedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_paged_bytes"),
"(PoolPagedBytes)",
nil,
nil,
)
c.PoolPagedResidentBytes = prometheus.NewDesc(
c.poolPagedResidentBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_paged_resident_bytes"),
"The size, in bytes, of the portion of the paged pool that is currently resident and active in physical memory. The paged pool is an area of the "+
"system virtual memory that is used for objects that can be written to disk when they are not being used (PoolPagedResidentBytes)",
nil,
nil,
)
c.StandbyCacheCoreBytes = prometheus.NewDesc(
c.standbyCacheCoreBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "standby_cache_core_bytes"),
"The amount of physical memory, in bytes, that is assigned to the core standby cache page lists. This memory contains cached data and code that is "+
"not actively in use by processes, the system and the system cache (StandbyCacheCoreBytes)",
nil,
nil,
)
c.StandbyCacheNormalPriorityBytes = prometheus.NewDesc(
c.standbyCacheNormalPriorityBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "standby_cache_normal_priority_bytes"),
"The amount of physical memory, in bytes, that is assigned to the normal priority standby cache page lists. This memory contains cached data and "+
"code that is not actively in use by processes, the system and the system cache (StandbyCacheNormalPriorityBytes)",
nil,
nil,
)
c.StandbyCacheReserveBytes = prometheus.NewDesc(
c.standbyCacheReserveBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "standby_cache_reserve_bytes"),
"The amount of physical memory, in bytes, that is assigned to the reserve standby cache page lists. This memory contains cached data and code "+
"that is not actively in use by processes, the system and the system cache (StandbyCacheReserveBytes)",
nil,
nil,
)
c.SystemCacheResidentBytes = prometheus.NewDesc(
c.systemCacheResidentBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "system_cache_resident_bytes"),
"The size, in bytes, of the portion of the system file cache which is currently resident and active in physical memory (SystemCacheResidentBytes)",
nil,
nil,
)
c.SystemCodeResidentBytes = prometheus.NewDesc(
c.systemCodeResidentBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "system_code_resident_bytes"),
"The size, in bytes, of the pageable operating system code that is currently resident and active in physical memory (SystemCodeResidentBytes)",
nil,
nil,
)
c.SystemCodeTotalBytes = prometheus.NewDesc(
c.systemCodeTotalBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "system_code_total_bytes"),
"The size, in bytes, of the pageable operating system code currently mapped into the system virtual address space (SystemCodeTotalBytes)",
nil,
nil,
)
c.SystemDriverResidentBytes = prometheus.NewDesc(
c.systemDriverResidentBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "system_driver_resident_bytes"),
"The size, in bytes, of the pageable physical memory being used by device drivers. It is the working set (physical memory area) of the drivers (SystemDriverResidentBytes)",
nil,
nil,
)
c.SystemDriverTotalBytes = prometheus.NewDesc(
c.systemDriverTotalBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "system_driver_total_bytes"),
"The size, in bytes, of the pageable virtual memory currently being used by device drivers. Pageable memory can be written to disk when it is not being used (SystemDriverTotalBytes)",
nil,
nil,
)
c.TransitionFaultsTotal = prometheus.NewDesc(
c.transitionFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transition_faults_total"),
"Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the "+
"modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPersec)",
nil,
nil,
)
c.TransitionPagesRepurposedTotal = prometheus.NewDesc(
c.transitionPagesRepurposedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transition_pages_repurposed_total"),
"Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPersec)",
nil,
nil,
)
c.WriteCopiesTotal = prometheus.NewDesc(
c.writeCopiesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_copies_total"),
"The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec)",
nil,
@@ -290,7 +303,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting memory metrics", "err", err)
return err
@@ -335,200 +348,200 @@ type memory struct {
WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []memory
if err := perflib.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, c.logger); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.AvailableBytes,
c.availableBytes,
prometheus.GaugeValue,
dst[0].AvailableBytes,
)
ch <- prometheus.MustNewConstMetric(
c.CacheBytes,
c.cacheBytes,
prometheus.GaugeValue,
dst[0].CacheBytes,
)
ch <- prometheus.MustNewConstMetric(
c.CacheBytesPeak,
c.cacheBytesPeak,
prometheus.GaugeValue,
dst[0].CacheBytesPeak,
)
ch <- prometheus.MustNewConstMetric(
c.CacheFaultsTotal,
c.cacheFaultsTotal,
prometheus.CounterValue,
dst[0].CacheFaultsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.CommitLimit,
c.commitLimit,
prometheus.GaugeValue,
dst[0].CommitLimit,
)
ch <- prometheus.MustNewConstMetric(
c.CommittedBytes,
c.committedBytes,
prometheus.GaugeValue,
dst[0].CommittedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.DemandZeroFaultsTotal,
c.demandZeroFaultsTotal,
prometheus.CounterValue,
dst[0].DemandZeroFaultsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.FreeAndZeroPageListBytes,
c.freeAndZeroPageListBytes,
prometheus.GaugeValue,
dst[0].FreeAndZeroPageListBytes,
)
ch <- prometheus.MustNewConstMetric(
c.FreeSystemPageTableEntries,
c.freeSystemPageTableEntries,
prometheus.GaugeValue,
dst[0].FreeSystemPageTableEntries,
)
ch <- prometheus.MustNewConstMetric(
c.ModifiedPageListBytes,
c.modifiedPageListBytes,
prometheus.GaugeValue,
dst[0].ModifiedPageListBytes,
)
ch <- prometheus.MustNewConstMetric(
c.PageFaultsTotal,
c.pageFaultsTotal,
prometheus.CounterValue,
dst[0].PageFaultsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPageReadsTotal,
c.swapPageReadsTotal,
prometheus.CounterValue,
dst[0].PageReadsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPagesReadTotal,
c.swapPagesReadTotal,
prometheus.CounterValue,
dst[0].PagesInputPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPagesWrittenTotal,
c.swapPagesWrittenTotal,
prometheus.CounterValue,
dst[0].PagesOutputPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPageOperationsTotal,
c.swapPageOperationsTotal,
prometheus.CounterValue,
dst[0].PagesPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SwapPageWritesTotal,
c.swapPageWritesTotal,
prometheus.CounterValue,
dst[0].PageWritesPersec,
)
ch <- prometheus.MustNewConstMetric(
c.PoolNonpagedAllocsTotal,
c.poolNonPagedAllocationsTotal,
prometheus.GaugeValue,
dst[0].PoolNonpagedAllocs,
)
ch <- prometheus.MustNewConstMetric(
c.PoolNonpagedBytes,
c.poolNonPagedBytes,
prometheus.GaugeValue,
dst[0].PoolNonpagedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.PoolPagedAllocsTotal,
c.poolPagedAllocationsTotal,
prometheus.CounterValue,
dst[0].PoolPagedAllocs,
)
ch <- prometheus.MustNewConstMetric(
c.PoolPagedBytes,
c.poolPagedBytes,
prometheus.GaugeValue,
dst[0].PoolPagedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.PoolPagedResidentBytes,
c.poolPagedResidentBytes,
prometheus.GaugeValue,
dst[0].PoolPagedResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.StandbyCacheCoreBytes,
c.standbyCacheCoreBytes,
prometheus.GaugeValue,
dst[0].StandbyCacheCoreBytes,
)
ch <- prometheus.MustNewConstMetric(
c.StandbyCacheNormalPriorityBytes,
c.standbyCacheNormalPriorityBytes,
prometheus.GaugeValue,
dst[0].StandbyCacheNormalPriorityBytes,
)
ch <- prometheus.MustNewConstMetric(
c.StandbyCacheReserveBytes,
c.standbyCacheReserveBytes,
prometheus.GaugeValue,
dst[0].StandbyCacheReserveBytes,
)
ch <- prometheus.MustNewConstMetric(
c.SystemCacheResidentBytes,
c.systemCacheResidentBytes,
prometheus.GaugeValue,
dst[0].SystemCacheResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.SystemCodeResidentBytes,
c.systemCodeResidentBytes,
prometheus.GaugeValue,
dst[0].SystemCodeResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.SystemCodeTotalBytes,
c.systemCodeTotalBytes,
prometheus.GaugeValue,
dst[0].SystemCodeTotalBytes,
)
ch <- prometheus.MustNewConstMetric(
c.SystemDriverResidentBytes,
c.systemDriverResidentBytes,
prometheus.GaugeValue,
dst[0].SystemDriverResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.SystemDriverTotalBytes,
c.systemDriverTotalBytes,
prometheus.GaugeValue,
dst[0].SystemDriverTotalBytes,
)
ch <- prometheus.MustNewConstMetric(
c.TransitionFaultsTotal,
c.transitionFaultsTotal,
prometheus.CounterValue,
dst[0].TransitionFaultsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.TransitionPagesRepurposedTotal,
c.transitionPagesRepurposedTotal,
prometheus.CounterValue,
dst[0].TransitionPagesRePurposedPersec,
)
ch <- prometheus.MustNewConstMetric(
c.WriteCopiesTotal,
c.writeCopiesTotal,
prometheus.CounterValue,
dst[0].WriteCopiesPersec,
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,10 @@
package mscluster_network
import (
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
@@ -15,65 +14,78 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI MSCluster_Network metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI MSCluster_Network metrics.
type Collector struct {
config Config
logger log.Logger
Characteristics *prometheus.Desc
Flags *prometheus.Desc
Metric *prometheus.Desc
Role *prometheus.Desc
State *prometheus.Desc
characteristics *prometheus.Desc
flags *prometheus.Desc
metric *prometheus.Desc
role *prometheus.Desc
state *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *collector) Build() error {
c.Characteristics = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides the characteristics of the network.",
[]string{"name"},
nil,
)
c.Flags = prometheus.NewDesc(
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the node. ",
[]string{"name"},
nil,
)
c.Metric = prometheus.NewDesc(
c.metric = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "metric"),
"The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false.",
[]string{"name"},
nil,
)
c.Role = prometheus.NewDesc(
c.role = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "role"),
"Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both ",
[]string{"name"},
nil,
)
c.State = prometheus.NewDesc(
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up",
[]string{"name"},
@@ -95,8 +107,8 @@ type MSCluster_Network struct {
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
// to the provided prometheus metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Network
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
@@ -105,35 +117,35 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
c.characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
c.flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Metric,
c.metric,
prometheus.GaugeValue,
float64(v.Metric),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Role,
c.role,
prometheus.GaugeValue,
float64(v.Role),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
c.state,
prometheus.GaugeValue,
float64(v.State),
v.Name,

View File

@@ -1,11 +1,10 @@
package mscluster_node
import (
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
@@ -15,131 +14,144 @@ type Config struct{}
var ConfigDefaults = Config{}
// Variable used by mscluster_resource and mscluster_resourcegroup
// Variable used by mscluster_resource and mscluster_resourcegroup.
var NodeName []string
// A collector is a Prometheus collector for WMI MSCluster_Node metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI MSCluster_Node metrics.
type Collector struct {
config Config
logger log.Logger
BuildNumber *prometheus.Desc
Characteristics *prometheus.Desc
DetectedCloudPlatform *prometheus.Desc
DynamicWeight *prometheus.Desc
Flags *prometheus.Desc
MajorVersion *prometheus.Desc
MinorVersion *prometheus.Desc
NeedsPreventQuorum *prometheus.Desc
NodeDrainStatus *prometheus.Desc
NodeHighestVersion *prometheus.Desc
NodeLowestVersion *prometheus.Desc
NodeWeight *prometheus.Desc
State *prometheus.Desc
StatusInformation *prometheus.Desc
buildNumber *prometheus.Desc
characteristics *prometheus.Desc
detectedCloudPlatform *prometheus.Desc
dynamicWeight *prometheus.Desc
flags *prometheus.Desc
majorVersion *prometheus.Desc
minorVersion *prometheus.Desc
needsPreventQuorum *prometheus.Desc
nodeDrainStatus *prometheus.Desc
nodeHighestVersion *prometheus.Desc
nodeLowestVersion *prometheus.Desc
nodeWeight *prometheus.Desc
state *prometheus.Desc
statusInformation *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *collector) Build() error {
c.BuildNumber = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.buildNumber = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "build_number"),
"Provides access to the node's BuildNumber property.",
[]string{"name"},
nil,
)
c.Characteristics = prometheus.NewDesc(
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides access to the characteristics set for the node.",
[]string{"name"},
nil,
)
c.DetectedCloudPlatform = prometheus.NewDesc(
c.detectedCloudPlatform = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "detected_cloud_platform"),
"(DetectedCloudPlatform)",
[]string{"name"},
nil,
)
c.DynamicWeight = prometheus.NewDesc(
c.dynamicWeight = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dynamic_weight"),
"The dynamic vote weight of the node adjusted by dynamic quorum feature.",
[]string{"name"},
nil,
)
c.Flags = prometheus.NewDesc(
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the node.",
[]string{"name"},
nil,
)
c.MajorVersion = prometheus.NewDesc(
c.majorVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "major_version"),
"Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed.",
[]string{"name"},
nil,
)
c.MinorVersion = prometheus.NewDesc(
c.minorVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "minor_version"),
"Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed.",
[]string{"name"},
nil,
)
c.NeedsPreventQuorum = prometheus.NewDesc(
c.needsPreventQuorum = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "needs_prevent_quorum"),
"Whether the cluster service on that node should be started with prevent quorum flag.",
[]string{"name"},
nil,
)
c.NodeDrainStatus = prometheus.NewDesc(
c.nodeDrainStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_drain_status"),
"The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed",
[]string{"name"},
nil,
)
c.NodeHighestVersion = prometheus.NewDesc(
c.nodeHighestVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_highest_version"),
"Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
)
c.NodeLowestVersion = prometheus.NewDesc(
c.nodeLowestVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_lowest_version"),
"Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
)
c.NodeWeight = prometheus.NewDesc(
c.nodeWeight = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_weight"),
"The vote weight of the node.",
[]string{"name"},
nil,
)
c.State = prometheus.NewDesc(
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining",
[]string{"name"},
nil,
)
c.StatusInformation = prometheus.NewDesc(
c.statusInformation = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status_information"),
"The isolation or quarantine status of the node.",
[]string{"name"},
@@ -171,7 +183,7 @@ type MSCluster_Node struct {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Node
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
@@ -181,100 +193,99 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
NodeName = []string{}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.BuildNumber,
c.buildNumber,
prometheus.GaugeValue,
float64(v.BuildNumber),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
c.characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DetectedCloudPlatform,
c.detectedCloudPlatform,
prometheus.GaugeValue,
float64(v.DetectedCloudPlatform),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DynamicWeight,
c.dynamicWeight,
prometheus.GaugeValue,
float64(v.DynamicWeight),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
c.flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MajorVersion,
c.majorVersion,
prometheus.GaugeValue,
float64(v.MajorVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MinorVersion,
c.minorVersion,
prometheus.GaugeValue,
float64(v.MinorVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NeedsPreventQuorum,
c.needsPreventQuorum,
prometheus.GaugeValue,
float64(v.NeedsPreventQuorum),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeDrainStatus,
c.nodeDrainStatus,
prometheus.GaugeValue,
float64(v.NodeDrainStatus),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeHighestVersion,
c.nodeHighestVersion,
prometheus.GaugeValue,
float64(v.NodeHighestVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeLowestVersion,
c.nodeLowestVersion,
prometheus.GaugeValue,
float64(v.NodeLowestVersion),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NodeWeight,
c.nodeWeight,
prometheus.GaugeValue,
float64(v.NodeWeight),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
c.state,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.StatusInformation,
c.statusInformation,
prometheus.GaugeValue,
float64(v.StatusInformation),
v.Name,

View File

@@ -1,12 +1,11 @@
package mscluster_resource
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_node"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@@ -16,155 +15,168 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI MSCluster_Resource metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI MSCluster_Resource metrics.
type Collector struct {
config Config
logger log.Logger
Characteristics *prometheus.Desc
DeadlockTimeout *prometheus.Desc
EmbeddedFailureAction *prometheus.Desc
Flags *prometheus.Desc
IsAlivePollInterval *prometheus.Desc
LooksAlivePollInterval *prometheus.Desc
MonitorProcessId *prometheus.Desc
OwnerNode *prometheus.Desc
PendingTimeout *prometheus.Desc
ResourceClass *prometheus.Desc
RestartAction *prometheus.Desc
RestartDelay *prometheus.Desc
RestartPeriod *prometheus.Desc
RestartThreshold *prometheus.Desc
RetryPeriodOnFailure *prometheus.Desc
State *prometheus.Desc
Subclass *prometheus.Desc
characteristics *prometheus.Desc
deadlockTimeout *prometheus.Desc
embeddedFailureAction *prometheus.Desc
flags *prometheus.Desc
isAlivePollInterval *prometheus.Desc
looksAlivePollInterval *prometheus.Desc
monitorProcessId *prometheus.Desc
ownerNode *prometheus.Desc
pendingTimeout *prometheus.Desc
resourceClass *prometheus.Desc
restartAction *prometheus.Desc
restartDelay *prometheus.Desc
restartPeriod *prometheus.Desc
restartThreshold *prometheus.Desc
retryPeriodOnFailure *prometheus.Desc
state *prometheus.Desc
subclass *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *collector) Build() error {
c.Characteristics = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides the characteristics of the object.",
[]string{"type", "owner_group", "name"},
nil,
)
c.DeadlockTimeout = prometheus.NewDesc(
c.deadlockTimeout = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "deadlock_timeout"),
"Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.EmbeddedFailureAction = prometheus.NewDesc(
c.embeddedFailureAction = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "embedded_failure_action"),
"The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
)
c.Flags = prometheus.NewDesc(
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the object.",
[]string{"type", "owner_group", "name"},
nil,
)
c.IsAlivePollInterval = prometheus.NewDesc(
c.isAlivePollInterval = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "is_alive_poll_interval"),
"Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.LooksAlivePollInterval = prometheus.NewDesc(
c.looksAlivePollInterval = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "looks_alive_poll_interval"),
"Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.MonitorProcessId = prometheus.NewDesc(
c.monitorProcessId = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "monitor_process_id"),
"Provides the process ID of the resource host service that is currently hosting the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.OwnerNode = prometheus.NewDesc(
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource. 0: Not hosted; 1: Hosted",
[]string{"type", "owner_group", "node_name", "name"},
nil,
)
c.OwnerNode = prometheus.NewDesc(
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource. 0: Not hosted; 1: Hosted",
[]string{"type", "owner_group", "node_name", "name"},
nil,
)
c.PendingTimeout = prometheus.NewDesc(
c.pendingTimeout = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pending_timeout"),
"Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated.",
[]string{"type", "owner_group", "name"},
nil,
)
c.ResourceClass = prometheus.NewDesc(
c.resourceClass = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "resource_class"),
"Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown ",
[]string{"type", "owner_group", "name"},
nil,
)
c.RestartAction = prometheus.NewDesc(
c.restartAction = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_action"),
"Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails.",
[]string{"type", "owner_group", "name"},
nil,
)
c.RestartDelay = prometheus.NewDesc(
c.restartDelay = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_delay"),
"Indicates the time delay before a failed resource is restarted.",
[]string{"type", "owner_group", "name"},
nil,
)
c.RestartPeriod = prometheus.NewDesc(
c.restartPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_period"),
"Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.RestartThreshold = prometheus.NewDesc(
c.restartThreshold = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_threshold"),
"Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property.",
[]string{"type", "owner_group", "name"},
nil,
)
c.RetryPeriodOnFailure = prometheus.NewDesc(
c.retryPeriodOnFailure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "retry_period_on_failure"),
"Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
)
c.State = prometheus.NewDesc(
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending ",
[]string{"type", "owner_group", "name"},
nil,
)
c.Subclass = prometheus.NewDesc(
c.subclass = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "subclass"),
"Provides the list of references to nodes that can be the owner of this resource.",
[]string{"type", "owner_group", "name"},
@@ -201,7 +213,7 @@ type MSCluster_Resource struct {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Resource
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
@@ -209,51 +221,50 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
c.characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DeadlockTimeout,
c.deadlockTimeout,
prometheus.GaugeValue,
float64(v.DeadlockTimeout),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.EmbeddedFailureAction,
c.embeddedFailureAction,
prometheus.GaugeValue,
float64(v.EmbeddedFailureAction),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
c.flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.IsAlivePollInterval,
c.isAlivePollInterval,
prometheus.GaugeValue,
float64(v.IsAlivePollInterval),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.LooksAlivePollInterval,
c.looksAlivePollInterval,
prometheus.GaugeValue,
float64(v.LooksAlivePollInterval),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MonitorProcessId,
c.monitorProcessId,
prometheus.GaugeValue,
float64(v.MonitorProcessId),
v.Type, v.OwnerGroup, v.Name,
@@ -266,7 +277,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.OwnerNode,
c.ownerNode,
prometheus.GaugeValue,
isCurrentState,
v.Type, v.OwnerGroup, node_name, v.Name,
@@ -275,63 +286,63 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
}
ch <- prometheus.MustNewConstMetric(
c.PendingTimeout,
c.pendingTimeout,
prometheus.GaugeValue,
float64(v.PendingTimeout),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ResourceClass,
c.resourceClass,
prometheus.GaugeValue,
float64(v.ResourceClass),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartAction,
c.restartAction,
prometheus.GaugeValue,
float64(v.RestartAction),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartDelay,
c.restartDelay,
prometheus.GaugeValue,
float64(v.RestartDelay),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartPeriod,
c.restartPeriod,
prometheus.GaugeValue,
float64(v.RestartPeriod),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RestartThreshold,
c.restartThreshold,
prometheus.GaugeValue,
float64(v.RestartThreshold),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RetryPeriodOnFailure,
c.retryPeriodOnFailure,
prometheus.GaugeValue,
float64(v.RetryPeriodOnFailure),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
c.state,
prometheus.GaugeValue,
float64(v.State),
v.Type, v.OwnerGroup, v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Subclass,
c.subclass,
prometheus.GaugeValue,
float64(v.Subclass),
v.Type, v.OwnerGroup, v.Name,

View File

@@ -1,12 +1,11 @@
package mscluster_resourcegroup
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_node"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@@ -16,136 +15,147 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI MSCluster_ResourceGroup metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI MSCluster_ResourceGroup metrics.
type Collector struct {
config Config
logger log.Logger
AutoFailbackType *prometheus.Desc
Characteristics *prometheus.Desc
ColdStartSetting *prometheus.Desc
DefaultOwner *prometheus.Desc
FailbackWindowEnd *prometheus.Desc
FailbackWindowStart *prometheus.Desc
FailoverPeriod *prometheus.Desc
FailoverThreshold *prometheus.Desc
FaultDomain *prometheus.Desc
Flags *prometheus.Desc
GroupType *prometheus.Desc
PlacementOptions *prometheus.Desc
OwnerNode *prometheus.Desc
Priority *prometheus.Desc
ResiliencyPeriod *prometheus.Desc
State *prometheus.Desc
autoFailbackType *prometheus.Desc
characteristics *prometheus.Desc
coldStartSetting *prometheus.Desc
defaultOwner *prometheus.Desc
failbackWindowEnd *prometheus.Desc
failbackWindowStart *prometheus.Desc
failOverPeriod *prometheus.Desc
failOverThreshold *prometheus.Desc
flags *prometheus.Desc
groupType *prometheus.Desc
ownerNode *prometheus.Desc
priority *prometheus.Desc
resiliencyPeriod *prometheus.Desc
state *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *collector) Build() error {
c.AutoFailbackType = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.autoFailbackType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "auto_failback_type"),
"Provides access to the group's AutoFailbackType property.",
[]string{"name"},
nil,
)
c.Characteristics = prometheus.NewDesc(
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides the characteristics of the group.",
[]string{"name"},
nil,
)
c.ColdStartSetting = prometheus.NewDesc(
c.coldStartSetting = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cold_start_setting"),
"Indicates whether a group can start after a cluster cold start.",
[]string{"name"},
nil,
)
c.DefaultOwner = prometheus.NewDesc(
c.defaultOwner = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "default_owner"),
"Number of the last node the resource group was activated on or explicitly moved to.",
[]string{"name"},
nil,
)
c.FailbackWindowEnd = prometheus.NewDesc(
c.failbackWindowEnd = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failback_window_end"),
"The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
)
c.FailbackWindowStart = prometheus.NewDesc(
c.failbackWindowStart = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failback_window_start"),
"The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
)
c.FailoverPeriod = prometheus.NewDesc(
c.failOverPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_period"),
"The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur.",
[]string{"name"},
nil,
)
c.FailoverThreshold = prometheus.NewDesc(
c.failOverThreshold = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_threshold"),
"The FailoverThreshold property specifies the maximum number of failover attempts.",
[]string{"name"},
nil,
)
c.Flags = prometheus.NewDesc(
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the group. ",
[]string{"name"},
nil,
)
c.GroupType = prometheus.NewDesc(
c.groupType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "group_type"),
"The Type of the resource group.",
[]string{"name"},
nil,
)
c.OwnerNode = prometheus.NewDesc(
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
[]string{"node_name", "name"},
nil,
)
c.OwnerNode = prometheus.NewDesc(
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
[]string{"node_name", "name"},
nil,
)
c.Priority = prometheus.NewDesc(
c.priority = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "priority"),
"Priority value of the resource group",
[]string{"name"},
nil,
)
c.ResiliencyPeriod = prometheus.NewDesc(
c.resiliencyPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "resiliency_period"),
"The resiliency period for this group, in seconds.",
[]string{"name"},
nil,
)
c.State = prometheus.NewDesc(
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending",
[]string{"name"},
@@ -177,7 +187,7 @@ type MSCluster_ResourceGroup struct {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_ResourceGroup
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
@@ -185,72 +195,71 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.AutoFailbackType,
c.autoFailbackType,
prometheus.GaugeValue,
float64(v.AutoFailbackType),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Characteristics,
c.characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ColdStartSetting,
c.coldStartSetting,
prometheus.GaugeValue,
float64(v.ColdStartSetting),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DefaultOwner,
c.defaultOwner,
prometheus.GaugeValue,
float64(v.DefaultOwner),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailbackWindowEnd,
c.failbackWindowEnd,
prometheus.GaugeValue,
float64(v.FailbackWindowEnd),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailbackWindowStart,
c.failbackWindowStart,
prometheus.GaugeValue,
float64(v.FailbackWindowStart),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverPeriod,
c.failOverPeriod,
prometheus.GaugeValue,
float64(v.FailoverPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverThreshold,
c.failOverThreshold,
prometheus.GaugeValue,
float64(v.FailoverThreshold),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Flags,
c.flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.GroupType,
c.groupType,
prometheus.GaugeValue,
float64(v.GroupType),
v.Name,
@@ -263,7 +272,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.OwnerNode,
c.ownerNode,
prometheus.GaugeValue,
isCurrentState,
node_name, v.Name,
@@ -272,26 +281,25 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
}
ch <- prometheus.MustNewConstMetric(
c.Priority,
c.priority,
prometheus.GaugeValue,
float64(v.Priority),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ResiliencyPeriod,
c.resiliencyPeriod,
prometheus.GaugeValue,
float64(v.ResiliencyPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.State,
c.state,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
}
return nil

View File

@@ -14,87 +14,97 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "msmq"
FlagMsmqWhereClause = "collector.msmq.msmq-where"
)
const Name = "msmq"
type Config struct {
QueryWhereClause string `yaml:"query_where_clause"`
QueryWhereClause *string `yaml:"query_where_clause"`
}
var ConfigDefaults = Config{
QueryWhereClause: "",
QueryWhereClause: utils.ToPTR(""),
}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics.
type Collector struct {
config Config
logger log.Logger
queryWhereClause *string
BytesinJournalQueue *prometheus.Desc
BytesinQueue *prometheus.Desc
MessagesinJournalQueue *prometheus.Desc
MessagesinQueue *prometheus.Desc
bytesInJournalQueue *prometheus.Desc
bytesInQueue *prometheus.Desc
messagesInJournalQueue *prometheus.Desc
messagesInQueue *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
queryWhereClause: &config.QueryWhereClause,
if config.QueryWhereClause == nil {
config.QueryWhereClause = ConfigDefaults.QueryWhereClause
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
queryWhereClause: app.
Flag(FlagMsmqWhereClause, "WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response.").
Default(ConfigDefaults.QueryWhereClause).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
app.Flag("collector.msmq.msmq-where", "WQL 'where' clause to use in WMI metrics query. "+
"Limits the response to the msmqs you specify and reduces the size of the response.").
Default(*c.config.QueryWhereClause).StringVar(c.config.QueryWhereClause)
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
if utils.IsEmpty(c.queryWhereClause) {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
if *c.config.QueryWhereClause == "" {
_ = level.Warn(c.logger).Log("msg", "No where-clause specified for msmq collector. This will generate a very large number of metrics!")
}
c.BytesinJournalQueue = prometheus.NewDesc(
c.bytesInJournalQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"),
"Size of queue journal in bytes",
[]string{"name"},
nil,
)
c.BytesinQueue = prometheus.NewDesc(
c.bytesInQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_queue"),
"Size of queue in bytes",
[]string{"name"},
nil,
)
c.MessagesinJournalQueue = prometheus.NewDesc(
c.messagesInJournalQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_in_journal_queue"),
"Count messages in queue journal",
[]string{"name"},
nil,
)
c.MessagesinQueue = prometheus.NewDesc(
c.messagesInQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_in_queue"),
"Count messages in queue",
[]string{"name"},
@@ -105,7 +115,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting msmq metrics", "err", err)
return err
@@ -113,50 +123,52 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil
}
type Win32_PerfRawData_MSMQ_MSMQQueue struct {
type msmqQueue struct {
Name string
BytesinJournalQueue uint64
BytesinQueue uint64
MessagesinJournalQueue uint64
MessagesinQueue uint64
BytesInJournalQueue uint64
BytesInQueue uint64
MessagesInJournalQueue uint64
MessagesInQueue uint64
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_MSMQ_MSMQQueue
q := wmi.QueryAllWhere(&dst, *c.queryWhereClause, c.logger)
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []msmqQueue
q := wmi.QueryAllForClassWhere(&dst, "Win32_PerfRawData_MSMQ_MSMQQueue", *c.config.QueryWhereClause, c.logger)
if err := wmi.Query(q, &dst); err != nil {
return err
}
for _, msmq := range dst {
ch <- prometheus.MustNewConstMetric(
c.BytesinJournalQueue,
c.bytesInJournalQueue,
prometheus.GaugeValue,
float64(msmq.BytesinJournalQueue),
float64(msmq.BytesInJournalQueue),
strings.ToLower(msmq.Name),
)
ch <- prometheus.MustNewConstMetric(
c.BytesinQueue,
c.bytesInQueue,
prometheus.GaugeValue,
float64(msmq.BytesinQueue),
float64(msmq.BytesInQueue),
strings.ToLower(msmq.Name),
)
ch <- prometheus.MustNewConstMetric(
c.MessagesinJournalQueue,
c.messagesInJournalQueue,
prometheus.GaugeValue,
float64(msmq.MessagesinJournalQueue),
float64(msmq.MessagesInJournalQueue),
strings.ToLower(msmq.Name),
)
ch <- prometheus.MustNewConstMetric(
c.MessagesinQueue,
c.messagesInQueue,
prometheus.GaugeValue,
float64(msmq.MessagesinQueue),
float64(msmq.MessagesInQueue),
strings.ToLower(msmq.Name),
)
}
return nil
}

View File

@@ -8,6 +8,6 @@ import (
)
func BenchmarkCollector(b *testing.B) {
// No context name required as collector source is WMI
// No context name required as Collector source is WMI
testutils.FuncBenchmarkCollector(b, msmq.Name, msmq.NewWithFlags)
}

File diff suppressed because it is too large Load Diff

View File

@@ -14,188 +14,200 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "net"
FlagNicExclude = "collector.net.nic-exclude"
FlagNicInclude = "collector.net.nic-include"
)
const Name = "net"
type Config struct {
NicInclude string `yaml:"nic_include"`
NicExclude string `yaml:"nic_exclude"`
NicExclude *regexp.Regexp `yaml:"nic_exclude"`
NicInclude *regexp.Regexp `yaml:"nic_include"`
}
var ConfigDefaults = Config{
NicInclude: ".+",
NicExclude: "",
NicExclude: types.RegExpEmpty,
NicInclude: types.RegExpAny,
}
var nicNameToUnderscore = regexp.MustCompile("[^a-zA-Z0-9]")
// A collector is a Prometheus collector for Perflib Network Interface metrics
type collector struct {
// A Collector is a Prometheus Collector for Perflib Network Interface metrics.
type Collector struct {
config Config
logger log.Logger
nicInclude *string
nicExclude *string
BytesReceivedTotal *prometheus.Desc
BytesSentTotal *prometheus.Desc
BytesTotal *prometheus.Desc
OutputQueueLength *prometheus.Desc
PacketsOutboundDiscarded *prometheus.Desc
PacketsOutboundErrors *prometheus.Desc
PacketsTotal *prometheus.Desc
PacketsReceivedDiscarded *prometheus.Desc
PacketsReceivedErrors *prometheus.Desc
PacketsReceivedTotal *prometheus.Desc
PacketsReceivedUnknown *prometheus.Desc
PacketsSentTotal *prometheus.Desc
CurrentBandwidth *prometheus.Desc
nicIncludePattern *regexp.Regexp
nicExcludePattern *regexp.Regexp
bytesReceivedTotal *prometheus.Desc
bytesSentTotal *prometheus.Desc
bytesTotal *prometheus.Desc
outputQueueLength *prometheus.Desc
packetsOutboundDiscarded *prometheus.Desc
packetsOutboundErrors *prometheus.Desc
packetsTotal *prometheus.Desc
packetsReceivedDiscarded *prometheus.Desc
packetsReceivedErrors *prometheus.Desc
packetsReceivedTotal *prometheus.Desc
packetsReceivedUnknown *prometheus.Desc
packetsSentTotal *prometheus.Desc
currentBandwidth *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
nicExclude: &config.NicExclude,
nicInclude: &config.NicInclude,
if config.NicExclude == nil {
config.NicExclude = ConfigDefaults.NicExclude
}
if config.NicInclude == nil {
config.NicInclude = ConfigDefaults.NicInclude
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{
nicInclude: app.Flag(
FlagNicInclude,
"Regexp of NIC:s to include. NIC name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.NicInclude).String(),
nicExclude: app.Flag(
FlagNicExclude,
"Regexp of NIC:s to exclude. NIC name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.NicExclude).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var nicExclude, nicInclude string
app.Flag(
"collector.net.nic-exclude",
"Regexp of NIC:s to exclude. NIC name must both match include and not match exclude to be included.",
).Default(c.config.NicExclude.String()).StringVar(&nicExclude)
app.Flag(
"collector.net.nic-include",
"Regexp of NIC:s to include. NIC name must both match include and not match exclude to be included.",
).Default(c.config.NicInclude.String()).StringVar(&nicInclude)
app.Action(func(*kingpin.ParseContext) error {
var err error
c.config.NicExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", nicExclude))
if err != nil {
return fmt.Errorf("collector.net.nic-exclude: %w", err)
}
c.config.NicInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", nicInclude))
if err != nil {
return fmt.Errorf("collector.net.nic-include: %w", err)
}
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Network Interface"}, nil
}
func (c *collector) Build() error {
c.BytesReceivedTotal = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.bytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"),
"(Network.BytesReceivedPerSec)",
[]string{"nic"},
nil,
)
c.BytesSentTotal = prometheus.NewDesc(
c.bytesSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_sent_total"),
"(Network.BytesSentPerSec)",
[]string{"nic"},
nil,
)
c.BytesTotal = prometheus.NewDesc(
c.bytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_total"),
"(Network.BytesTotalPerSec)",
[]string{"nic"},
nil,
)
c.OutputQueueLength = prometheus.NewDesc(
c.outputQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "output_queue_length_packets"),
"(Network.OutputQueueLength)",
[]string{"nic"},
nil,
)
c.PacketsOutboundDiscarded = prometheus.NewDesc(
c.packetsOutboundDiscarded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_outbound_discarded_total"),
"(Network.PacketsOutboundDiscarded)",
[]string{"nic"},
nil,
)
c.PacketsOutboundErrors = prometheus.NewDesc(
c.packetsOutboundErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_outbound_errors_total"),
"(Network.PacketsOutboundErrors)",
[]string{"nic"},
nil,
)
c.PacketsReceivedDiscarded = prometheus.NewDesc(
c.packetsReceivedDiscarded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_discarded_total"),
"(Network.PacketsReceivedDiscarded)",
[]string{"nic"},
nil,
)
c.PacketsReceivedErrors = prometheus.NewDesc(
c.packetsReceivedErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_errors_total"),
"(Network.PacketsReceivedErrors)",
[]string{"nic"},
nil,
)
c.PacketsReceivedTotal = prometheus.NewDesc(
c.packetsReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
"(Network.PacketsReceivedPerSec)",
[]string{"nic"},
nil,
)
c.PacketsReceivedUnknown = prometheus.NewDesc(
c.packetsReceivedUnknown = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_unknown_total"),
"(Network.PacketsReceivedUnknown)",
[]string{"nic"},
nil,
)
c.PacketsTotal = prometheus.NewDesc(
c.packetsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_total"),
"(Network.PacketsPerSec)",
[]string{"nic"},
nil,
)
c.PacketsSentTotal = prometheus.NewDesc(
c.packetsSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_sent_total"),
"(Network.PacketsSentPerSec)",
[]string{"nic"},
nil,
)
c.CurrentBandwidth = prometheus.NewDesc(
c.currentBandwidth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_bandwidth_bytes"),
"(Network.CurrentBandwidth)",
[]string{"nic"},
nil,
)
var err error
c.nicIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.nicInclude))
if err != nil {
return err
}
c.nicExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.nicExclude))
if err != nil {
return err
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting net metrics", "err", err)
return err
@@ -228,7 +240,7 @@ type networkInterface struct {
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []networkInterface
if err := perflib.UnmarshalObject(ctx.PerfObjects["Network Interface"], &dst, c.logger); err != nil {
@@ -236,8 +248,8 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
}
for _, nic := range dst {
if c.nicExcludePattern.MatchString(nic.Name) ||
!c.nicIncludePattern.MatchString(nic.Name) {
if c.config.NicExclude.MatchString(nic.Name) ||
!c.config.NicInclude.MatchString(nic.Name) {
continue
}
@@ -248,83 +260,84 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// Counters
ch <- prometheus.MustNewConstMetric(
c.BytesReceivedTotal,
c.bytesReceivedTotal,
prometheus.CounterValue,
nic.BytesReceivedPerSec,
name,
)
ch <- prometheus.MustNewConstMetric(
c.BytesSentTotal,
c.bytesSentTotal,
prometheus.CounterValue,
nic.BytesSentPerSec,
name,
)
ch <- prometheus.MustNewConstMetric(
c.BytesTotal,
c.bytesTotal,
prometheus.CounterValue,
nic.BytesTotalPerSec,
name,
)
ch <- prometheus.MustNewConstMetric(
c.OutputQueueLength,
c.outputQueueLength,
prometheus.GaugeValue,
nic.OutputQueueLength,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsOutboundDiscarded,
c.packetsOutboundDiscarded,
prometheus.CounterValue,
nic.PacketsOutboundDiscarded,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsOutboundErrors,
c.packetsOutboundErrors,
prometheus.CounterValue,
nic.PacketsOutboundErrors,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsTotal,
c.packetsTotal,
prometheus.CounterValue,
nic.PacketsPerSec,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceivedDiscarded,
c.packetsReceivedDiscarded,
prometheus.CounterValue,
nic.PacketsReceivedDiscarded,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceivedErrors,
c.packetsReceivedErrors,
prometheus.CounterValue,
nic.PacketsReceivedErrors,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceivedTotal,
c.packetsReceivedTotal,
prometheus.CounterValue,
nic.PacketsReceivedPerSec,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceivedUnknown,
c.packetsReceivedUnknown,
prometheus.CounterValue,
nic.PacketsReceivedUnknown,
name,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsSentTotal,
c.packetsSentTotal,
prometheus.CounterValue,
nic.PacketsSentPerSec,
name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentBandwidth,
c.currentBandwidth,
prometheus.GaugeValue,
nic.CurrentBandwidth/8,
name,
)
}
return nil
}

View File

@@ -11,9 +11,9 @@ import (
)
func BenchmarkCollector(b *testing.B) {
// Include is not set in testing context (kingpin flags not parsed), causing the collector to skip all interfaces.
// PrinterInclude is not set in testing context (kingpin flags not parsed), causing the collector to skip all interfaces.
localNicInclude := ".+"
kingpin.CommandLine.GetArg(net.FlagNicInclude).StringVar(&localNicInclude)
kingpin.CommandLine.GetArg("collector.net.nic-include").StringVar(&localNicInclude)
testutils.FuncBenchmarkCollector(b, net.Name, net.NewWithFlags)
}

View File

@@ -7,6 +7,8 @@ import (
)
func TestNetworkToInstanceName(t *testing.T) {
t.Parallel()
data := map[string]string{
"Intel[R] Dual Band Wireless-AC 8260": "Intel_R__Dual_Band_Wireless_AC_8260",
}

View File

@@ -3,12 +3,11 @@
package netframework_clrexceptions
import (
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
@@ -18,58 +17,71 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics.
type Collector struct {
config Config
logger log.Logger
NumberofExcepsThrown *prometheus.Desc
NumberofFilters *prometheus.Desc
NumberofFinallys *prometheus.Desc
ThrowToCatchDepth *prometheus.Desc
numberOfExceptionsThrown *prometheus.Desc
numberOfFilters *prometheus.Desc
numberOfFinally *prometheus.Desc
throwToCatchDepth *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.NumberofExcepsThrown = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.numberOfExceptionsThrown = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exceptions_thrown_total"),
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
[]string{"process"},
nil,
)
c.NumberofFilters = prometheus.NewDesc(
c.numberOfFilters = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exceptions_filters_total"),
"Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled.",
[]string{"process"},
nil,
)
c.NumberofFinallys = prometheus.NewDesc(
c.numberOfFinally = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exceptions_finallys_total"),
"Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter.",
[]string{"process"},
nil,
)
c.ThrowToCatchDepth = prometheus.NewDesc(
c.throwToCatchDepth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "throw_to_catch_depth_total"),
"Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception.",
[]string{"process"},
@@ -80,7 +92,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "err", err)
return err
@@ -98,7 +110,7 @@ type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
ThrowToCatchDepthPersec uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -106,34 +118,33 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.NumberofExcepsThrown,
c.numberOfExceptionsThrown,
prometheus.CounterValue,
float64(process.NumberofExcepsThrown),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberofFilters,
c.numberOfFilters,
prometheus.CounterValue,
float64(process.NumberofFiltersPersec),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberofFinallys,
c.numberOfFinally,
prometheus.CounterValue,
float64(process.NumberofFinallysPersec),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ThrowToCatchDepth,
c.throwToCatchDepth,
prometheus.CounterValue,
float64(process.ThrowToCatchDepthPersec),
process.Name,

View File

@@ -8,6 +8,6 @@ import (
)
func BenchmarkCollector(b *testing.B) {
// No context name required as collector source is WMI
// No context name required as Collector source is WMI
testutils.FuncBenchmarkCollector(b, netframework_clrexceptions.Name, netframework_clrexceptions.NewWithFlags)
}

View File

@@ -3,12 +3,11 @@
package netframework_clrinterop
import (
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
@@ -18,51 +17,64 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics.
type Collector struct {
config Config
logger log.Logger
NumberofCCWs *prometheus.Desc
Numberofmarshalling *prometheus.Desc
NumberofStubs *prometheus.Desc
numberOfCCWs *prometheus.Desc
numberOfMarshalling *prometheus.Desc
numberOfStubs *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.NumberofCCWs = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.numberOfCCWs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "com_callable_wrappers_total"),
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
[]string{"process"},
nil,
)
c.Numberofmarshalling = prometheus.NewDesc(
c.numberOfMarshalling = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "interop_marshalling_total"),
"Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started.",
[]string{"process"},
nil,
)
c.NumberofStubs = prometheus.NewDesc(
c.numberOfStubs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "interop_stubs_created_total"),
"Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.",
[]string{"process"},
@@ -73,7 +85,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "err", err)
return err
@@ -91,7 +103,7 @@ type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
NumberofTLBimportsPersec uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -99,27 +111,26 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.NumberofCCWs,
c.numberOfCCWs,
prometheus.CounterValue,
float64(process.NumberofCCWs),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Numberofmarshalling,
c.numberOfMarshalling,
prometheus.CounterValue,
float64(process.Numberofmarshalling),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberofStubs,
c.numberOfStubs,
prometheus.CounterValue,
float64(process.NumberofStubs),
process.Name,

View File

@@ -17,58 +17,71 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics.
type Collector struct {
config Config
logger log.Logger
NumberofMethodsJitted *prometheus.Desc
TimeinJit *prometheus.Desc
StandardJitFailures *prometheus.Desc
TotalNumberofILBytesJitted *prometheus.Desc
numberOfMethodsJitted *prometheus.Desc
timeInJit *prometheus.Desc
standardJitFailures *prometheus.Desc
totalNumberOfILBytesJitted *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.NumberofMethodsJitted = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.numberOfMethodsJitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_methods_total"),
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
[]string{"process"},
nil,
)
c.TimeinJit = prometheus.NewDesc(
c.timeInJit = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_time_percent"),
"Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.",
[]string{"process"},
nil,
)
c.StandardJitFailures = prometheus.NewDesc(
c.standardJitFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_standard_failures_total"),
"Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.",
[]string{"process"},
nil,
)
c.TotalNumberofILBytesJitted = prometheus.NewDesc(
c.totalNumberOfILBytesJitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_il_bytes_total"),
"Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started",
[]string{"process"},
@@ -79,7 +92,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrjit metrics", "err", err)
return err
@@ -99,7 +112,7 @@ type Win32_PerfRawData_NETFramework_NETCLRJit struct {
TotalNumberofILBytesJitted uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -107,34 +120,33 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.NumberofMethodsJitted,
c.numberOfMethodsJitted,
prometheus.CounterValue,
float64(process.NumberofMethodsJitted),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TimeinJit,
c.timeInJit,
prometheus.GaugeValue,
float64(process.PercentTimeinJit)/float64(process.Frequency_PerfTime),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.StandardJitFailures,
c.standardJitFailures,
prometheus.GaugeValue,
float64(process.StandardJitFailures),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalNumberofILBytesJitted,
c.totalNumberOfILBytesJitted,
prometheus.CounterValue,
float64(process.TotalNumberofILBytesJitted),
process.Name,

View File

@@ -17,93 +17,106 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics.
type Collector struct {
config Config
logger log.Logger
BytesinLoaderHeap *prometheus.Desc
Currentappdomains *prometheus.Desc
CurrentAssemblies *prometheus.Desc
CurrentClassesLoaded *prometheus.Desc
TotalAppdomains *prometheus.Desc
Totalappdomainsunloaded *prometheus.Desc
TotalAssemblies *prometheus.Desc
TotalClassesLoaded *prometheus.Desc
TotalNumberofLoadFailures *prometheus.Desc
bytesInLoaderHeap *prometheus.Desc
currentAppDomains *prometheus.Desc
currentAssemblies *prometheus.Desc
currentClassesLoaded *prometheus.Desc
totalAppDomains *prometheus.Desc
totalAppDomainsUnloaded *prometheus.Desc
totalAssemblies *prometheus.Desc
totalClassesLoaded *prometheus.Desc
totalNumberOfLoadFailures *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.BytesinLoaderHeap = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.bytesInLoaderHeap = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "loader_heap_size_bytes"),
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
[]string{"process"},
nil,
)
c.Currentappdomains = prometheus.NewDesc(
c.currentAppDomains = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_current"),
"Displays the current number of application domains loaded in this application.",
[]string{"process"},
nil,
)
c.CurrentAssemblies = prometheus.NewDesc(
c.currentAssemblies = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_current"),
"Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
[]string{"process"},
nil,
)
c.CurrentClassesLoaded = prometheus.NewDesc(
c.currentClassesLoaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_current"),
"Displays the current number of classes loaded in all assemblies.",
[]string{"process"},
nil,
)
c.TotalAppdomains = prometheus.NewDesc(
c.totalAppDomains = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_total"),
"Displays the peak number of application domains loaded since the application started.",
[]string{"process"},
nil,
)
c.Totalappdomainsunloaded = prometheus.NewDesc(
c.totalAppDomainsUnloaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "appdomains_unloaded_total"),
"Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded.",
[]string{"process"},
nil,
)
c.TotalAssemblies = prometheus.NewDesc(
c.totalAssemblies = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_total"),
"Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
[]string{"process"},
nil,
)
c.TotalClassesLoaded = prometheus.NewDesc(
c.totalClassesLoaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_total"),
"Displays the cumulative number of classes loaded in all assemblies since the application started.",
[]string{"process"},
nil,
)
c.TotalNumberofLoadFailures = prometheus.NewDesc(
c.totalNumberOfLoadFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "class_load_failures_total"),
"Displays the peak number of classes that have failed to load since the application started.",
[]string{"process"},
@@ -114,7 +127,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrloading metrics", "err", err)
return err
@@ -143,7 +156,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
TotalNumberofLoadFailures uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -151,69 +164,68 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.BytesinLoaderHeap,
c.bytesInLoaderHeap,
prometheus.GaugeValue,
float64(process.BytesinLoaderHeap),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Currentappdomains,
c.currentAppDomains,
prometheus.GaugeValue,
float64(process.Currentappdomains),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentAssemblies,
c.currentAssemblies,
prometheus.GaugeValue,
float64(process.CurrentAssemblies),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentClassesLoaded,
c.currentClassesLoaded,
prometheus.GaugeValue,
float64(process.CurrentClassesLoaded),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalAppdomains,
c.totalAppDomains,
prometheus.CounterValue,
float64(process.TotalAppdomains),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Totalappdomainsunloaded,
c.totalAppDomainsUnloaded,
prometheus.CounterValue,
float64(process.Totalappdomainsunloaded),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalAssemblies,
c.totalAssemblies,
prometheus.CounterValue,
float64(process.TotalAssemblies),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalClassesLoaded,
c.totalClassesLoaded,
prometheus.CounterValue,
float64(process.TotalClassesLoaded),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalNumberofLoadFailures,
c.totalNumberOfLoadFailures,
prometheus.CounterValue,
float64(process.TotalNumberofLoadFailures),
process.Name,

View File

@@ -17,79 +17,92 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics.
type Collector struct {
config Config
logger log.Logger
CurrentQueueLength *prometheus.Desc
NumberofcurrentlogicalThreads *prometheus.Desc
NumberofcurrentphysicalThreads *prometheus.Desc
Numberofcurrentrecognizedthreads *prometheus.Desc
Numberoftotalrecognizedthreads *prometheus.Desc
QueueLengthPeak *prometheus.Desc
TotalNumberofContentions *prometheus.Desc
currentQueueLength *prometheus.Desc
numberOfCurrentLogicalThreads *prometheus.Desc
numberOfCurrentPhysicalThreads *prometheus.Desc
numberOfCurrentRecognizedThreads *prometheus.Desc
numberOfTotalRecognizedThreads *prometheus.Desc
queueLengthPeak *prometheus.Desc
totalNumberOfContentions *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.CurrentQueueLength = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.currentQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_queue_length"),
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
[]string{"process"},
nil,
)
c.NumberofcurrentlogicalThreads = prometheus.NewDesc(
c.numberOfCurrentLogicalThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_logical_threads"),
"Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. ",
[]string{"process"},
nil,
)
c.NumberofcurrentphysicalThreads = prometheus.NewDesc(
c.numberOfCurrentPhysicalThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "physical_threads_current"),
"Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process.",
[]string{"process"},
nil,
)
c.Numberofcurrentrecognizedthreads = prometheus.NewDesc(
c.numberOfCurrentRecognizedThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_current"),
"Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
[]string{"process"},
nil,
)
c.Numberoftotalrecognizedthreads = prometheus.NewDesc(
c.numberOfTotalRecognizedThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_total"),
"Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
[]string{"process"},
nil,
)
c.QueueLengthPeak = prometheus.NewDesc(
c.queueLengthPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "queue_length_total"),
"Displays the total number of threads that waited to acquire a managed lock since the application started.",
[]string{"process"},
nil,
)
c.TotalNumberofContentions = prometheus.NewDesc(
c.totalNumberOfContentions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "contentions_total"),
"Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully.",
[]string{"process"},
@@ -100,7 +113,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "err", err)
return err
@@ -123,7 +136,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
TotalNumberofContentions uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -131,55 +144,54 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.CurrentQueueLength,
c.currentQueueLength,
prometheus.GaugeValue,
float64(process.CurrentQueueLength),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberofcurrentlogicalThreads,
c.numberOfCurrentLogicalThreads,
prometheus.GaugeValue,
float64(process.NumberofcurrentlogicalThreads),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberofcurrentphysicalThreads,
c.numberOfCurrentPhysicalThreads,
prometheus.GaugeValue,
float64(process.NumberofcurrentphysicalThreads),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Numberofcurrentrecognizedthreads,
c.numberOfCurrentRecognizedThreads,
prometheus.GaugeValue,
float64(process.Numberofcurrentrecognizedthreads),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Numberoftotalrecognizedthreads,
c.numberOfTotalRecognizedThreads,
prometheus.CounterValue,
float64(process.Numberoftotalrecognizedthreads),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.QueueLengthPeak,
c.queueLengthPeak,
prometheus.CounterValue,
float64(process.QueueLengthPeak),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalNumberofContentions,
c.totalNumberOfContentions,
prometheus.CounterValue,
float64(process.TotalNumberofContentions),
process.Name,

View File

@@ -17,117 +17,127 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics.
type Collector struct {
config Config
logger log.Logger
AllocatedBytes *prometheus.Desc
FinalizationSurvivors *prometheus.Desc
HeapSize *prometheus.Desc
PromotedBytes *prometheus.Desc
NumberGCHandles *prometheus.Desc
NumberCollections *prometheus.Desc
NumberInducedGC *prometheus.Desc
NumberofPinnedObjects *prometheus.Desc
NumberofSinkBlocksinuse *prometheus.Desc
NumberTotalCommittedBytes *prometheus.Desc
NumberTotalreservedBytes *prometheus.Desc
TimeinGC *prometheus.Desc
PromotedFinalizationMemoryfromGen0 *prometheus.Desc
PromotedMemoryfromGen0 *prometheus.Desc
PromotedMemoryfromGen1 *prometheus.Desc
allocatedBytes *prometheus.Desc
finalizationSurvivors *prometheus.Desc
heapSize *prometheus.Desc
promotedBytes *prometheus.Desc
numberGCHandles *prometheus.Desc
numberCollections *prometheus.Desc
numberInducedGC *prometheus.Desc
numberOfPinnedObjects *prometheus.Desc
numberOfSinkBlocksInUse *prometheus.Desc
numberTotalCommittedBytes *prometheus.Desc
numberTotalReservedBytes *prometheus.Desc
timeInGC *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.AllocatedBytes = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.allocatedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "allocated_bytes_total"),
"Displays the total number of bytes allocated on the garbage collection heap.",
[]string{"process"},
nil,
)
c.FinalizationSurvivors = prometheus.NewDesc(
c.finalizationSurvivors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "finalization_survivors"),
"Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized.",
[]string{"process"},
nil,
)
c.HeapSize = prometheus.NewDesc(
c.heapSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "heap_size_bytes"),
"Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated.",
[]string{"process", "area"},
nil,
)
c.PromotedBytes = prometheus.NewDesc(
c.promotedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "promoted_bytes"),
"Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection.",
[]string{"process", "area"},
nil,
)
c.NumberGCHandles = prometheus.NewDesc(
c.numberGCHandles = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "number_gc_handles"),
"Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment.",
[]string{"process"},
nil,
)
c.NumberCollections = prometheus.NewDesc(
c.numberCollections = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "collections_total"),
"Displays the number of times the generation objects are garbage collected since the application started.",
[]string{"process", "area"},
nil,
)
c.NumberInducedGC = prometheus.NewDesc(
c.numberInducedGC = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "induced_gc_total"),
"Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect.",
[]string{"process"},
nil,
)
c.NumberofPinnedObjects = prometheus.NewDesc(
c.numberOfPinnedObjects = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "number_pinned_objects"),
"Displays the number of pinned objects encountered in the last garbage collection.",
[]string{"process"},
nil,
)
c.NumberofSinkBlocksinuse = prometheus.NewDesc(
c.numberOfSinkBlocksInUse = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "number_sink_blocksinuse"),
"Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.",
[]string{"process"},
nil,
)
c.NumberTotalCommittedBytes = prometheus.NewDesc(
c.numberTotalCommittedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "committed_bytes"),
"Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file.",
[]string{"process"},
nil,
)
c.NumberTotalreservedBytes = prometheus.NewDesc(
c.numberTotalReservedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "reserved_bytes"),
"Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.",
[]string{"process"},
nil,
)
c.TimeinGC = prometheus.NewDesc(
c.timeInGC = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gc_time_percent"),
"Displays the percentage of time that was spent performing a garbage collection in the last sample.",
[]string{"process"},
@@ -138,7 +148,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "err", err)
return err
@@ -180,7 +190,7 @@ type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
PromotedMemoryfromGen1 uint64
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -188,27 +198,26 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.AllocatedBytes,
c.allocatedBytes,
prometheus.CounterValue,
float64(process.AllocatedBytesPersec),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FinalizationSurvivors,
c.finalizationSurvivors,
prometheus.GaugeValue,
float64(process.FinalizationSurvivors),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.HeapSize,
c.heapSize,
prometheus.GaugeValue,
float64(process.Gen0heapsize),
process.Name,
@@ -216,7 +225,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.PromotedBytes,
c.promotedBytes,
prometheus.GaugeValue,
float64(process.Gen0PromotedBytesPerSec),
process.Name,
@@ -224,7 +233,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.HeapSize,
c.heapSize,
prometheus.GaugeValue,
float64(process.Gen1heapsize),
process.Name,
@@ -232,7 +241,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.PromotedBytes,
c.promotedBytes,
prometheus.GaugeValue,
float64(process.Gen1PromotedBytesPerSec),
process.Name,
@@ -240,7 +249,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.HeapSize,
c.heapSize,
prometheus.GaugeValue,
float64(process.Gen2heapsize),
process.Name,
@@ -248,7 +257,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.HeapSize,
c.heapSize,
prometheus.GaugeValue,
float64(process.LargeObjectHeapsize),
process.Name,
@@ -256,14 +265,14 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.NumberGCHandles,
c.numberGCHandles,
prometheus.GaugeValue,
float64(process.NumberGCHandles),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberCollections,
c.numberCollections,
prometheus.CounterValue,
float64(process.NumberGen0Collections),
process.Name,
@@ -271,7 +280,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.NumberCollections,
c.numberCollections,
prometheus.CounterValue,
float64(process.NumberGen1Collections),
process.Name,
@@ -279,7 +288,7 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.NumberCollections,
c.numberCollections,
prometheus.CounterValue,
float64(process.NumberGen2Collections),
process.Name,
@@ -287,42 +296,42 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.NumberInducedGC,
c.numberInducedGC,
prometheus.CounterValue,
float64(process.NumberInducedGC),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberofPinnedObjects,
c.numberOfPinnedObjects,
prometheus.GaugeValue,
float64(process.NumberofPinnedObjects),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberofSinkBlocksinuse,
c.numberOfSinkBlocksInUse,
prometheus.GaugeValue,
float64(process.NumberofSinkBlocksinuse),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberTotalCommittedBytes,
c.numberTotalCommittedBytes,
prometheus.GaugeValue,
float64(process.NumberTotalcommittedBytes),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NumberTotalreservedBytes,
c.numberTotalReservedBytes,
prometheus.GaugeValue,
float64(process.NumberTotalreservedBytes),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TimeinGC,
c.timeInGC,
prometheus.GaugeValue,
float64(100*process.PercentTimeinGC)/float64(process.PercentTimeinGC_base),
process.Name,

View File

@@ -17,72 +17,85 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics.
type Collector struct {
config Config
logger log.Logger
Channels *prometheus.Desc
ContextBoundClassesLoaded *prometheus.Desc
ContextBoundObjects *prometheus.Desc
ContextProxies *prometheus.Desc
Contexts *prometheus.Desc
TotalRemoteCalls *prometheus.Desc
channels *prometheus.Desc
contextBoundClassesLoaded *prometheus.Desc
contextBoundObjects *prometheus.Desc
contextProxies *prometheus.Desc
contexts *prometheus.Desc
totalRemoteCalls *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.Channels = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.channels = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "channels_total"),
"Displays the total number of remoting channels registered across all application domains since application started.",
[]string{"process"},
nil,
)
c.ContextBoundClassesLoaded = prometheus.NewDesc(
c.contextBoundClassesLoaded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_bound_classes_loaded"),
"Displays the current number of context-bound classes that are loaded.",
[]string{"process"},
nil,
)
c.ContextBoundObjects = prometheus.NewDesc(
c.contextBoundObjects = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_bound_objects_total"),
"Displays the total number of context-bound objects allocated.",
[]string{"process"},
nil,
)
c.ContextProxies = prometheus.NewDesc(
c.contextProxies = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_proxies_total"),
"Displays the total number of remoting proxy objects in this process since it started.",
[]string{"process"},
nil,
)
c.Contexts = prometheus.NewDesc(
c.contexts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "contexts"),
"Displays the current number of remoting contexts in the application.",
[]string{"process"},
nil,
)
c.TotalRemoteCalls = prometheus.NewDesc(
c.totalRemoteCalls = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "remote_calls_total"),
"Displays the total number of remote procedure calls invoked since the application started.",
[]string{"process"},
@@ -93,7 +106,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "err", err)
return err
@@ -113,7 +126,7 @@ type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
TotalRemoteCalls uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -121,48 +134,47 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.Channels,
c.channels,
prometheus.CounterValue,
float64(process.Channels),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ContextBoundClassesLoaded,
c.contextBoundClassesLoaded,
prometheus.GaugeValue,
float64(process.ContextBoundClassesLoaded),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ContextBoundObjects,
c.contextBoundObjects,
prometheus.CounterValue,
float64(process.ContextBoundObjectsAllocPersec),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ContextProxies,
c.contextProxies,
prometheus.CounterValue,
float64(process.ContextProxies),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.Contexts,
c.contexts,
prometheus.GaugeValue,
float64(process.Contexts),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalRemoteCalls,
c.totalRemoteCalls,
prometheus.CounterValue,
float64(process.TotalRemoteCalls),
process.Name,

View File

@@ -17,58 +17,71 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics.
type Collector struct {
config Config
logger log.Logger
NumberLinkTimeChecks *prometheus.Desc
TimeinRTchecks *prometheus.Desc
StackWalkDepth *prometheus.Desc
TotalRuntimeChecks *prometheus.Desc
numberLinkTimeChecks *prometheus.Desc
timeInRTChecks *prometheus.Desc
stackWalkDepth *prometheus.Desc
totalRuntimeChecks *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.NumberLinkTimeChecks = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.numberLinkTimeChecks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "link_time_checks_total"),
"Displays the total number of link-time code access security checks since the application started.",
[]string{"process"},
nil,
)
c.TimeinRTchecks = prometheus.NewDesc(
c.timeInRTChecks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rt_checks_time_percent"),
"Displays the percentage of time spent performing runtime code access security checks in the last sample.",
[]string{"process"},
nil,
)
c.StackWalkDepth = prometheus.NewDesc(
c.stackWalkDepth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "stack_walk_depth"),
"Displays the depth of the stack during that last runtime code access security check.",
[]string{"process"},
nil,
)
c.TotalRuntimeChecks = prometheus.NewDesc(
c.totalRuntimeChecks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "runtime_checks_total"),
"Displays the total number of runtime code access security checks performed since the application started.",
[]string{"process"},
@@ -79,7 +92,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "err", err)
return err
@@ -98,7 +111,7 @@ type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
TotalRuntimeChecks uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -106,34 +119,33 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
for _, process := range dst {
if process.Name == "_Global_" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.NumberLinkTimeChecks,
c.numberLinkTimeChecks,
prometheus.CounterValue,
float64(process.NumberLinkTimeChecks),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TimeinRTchecks,
c.timeInRTChecks,
prometheus.GaugeValue,
float64(process.PercentTimeinRTchecks)/float64(process.Frequency_PerfTime),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.StackWalkDepth,
c.stackWalkDepth,
prometheus.GaugeValue,
float64(process.StackWalkDepth),
process.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalRuntimeChecks,
c.totalRuntimeChecks,
prometheus.CounterValue,
float64(process.TotalRuntimeChecks),
process.Name,

View File

@@ -17,207 +17,220 @@ type Config struct{}
var ConfigDefaults = Config{}
// collector is a Prometheus collector for WMI Win32_PerfRawData_IAS_NPSAuthenticationServer and Win32_PerfRawData_IAS_NPSAccountingServer metrics
type collector struct {
// Collector is a Prometheus Collector for WMI Win32_PerfRawData_IAS_NPSAuthenticationServer and Win32_PerfRawData_IAS_NPSAccountingServer metrics.
type Collector struct {
config Config
logger log.Logger
AccessAccepts *prometheus.Desc
AccessChallenges *prometheus.Desc
AccessRejects *prometheus.Desc
AccessRequests *prometheus.Desc
AccessBadAuthenticators *prometheus.Desc
AccessDroppedPackets *prometheus.Desc
AccessInvalidRequests *prometheus.Desc
AccessMalformedPackets *prometheus.Desc
AccessPacketsReceived *prometheus.Desc
AccessPacketsSent *prometheus.Desc
AccessServerResetTime *prometheus.Desc
AccessServerUpTime *prometheus.Desc
AccessUnknownType *prometheus.Desc
accessAccepts *prometheus.Desc
accessChallenges *prometheus.Desc
accessRejects *prometheus.Desc
accessRequests *prometheus.Desc
accessBadAuthenticators *prometheus.Desc
accessDroppedPackets *prometheus.Desc
accessInvalidRequests *prometheus.Desc
accessMalformedPackets *prometheus.Desc
accessPacketsReceived *prometheus.Desc
accessPacketsSent *prometheus.Desc
accessServerResetTime *prometheus.Desc
accessServerUpTime *prometheus.Desc
accessUnknownType *prometheus.Desc
AccountingRequests *prometheus.Desc
AccountingResponses *prometheus.Desc
AccountingBadAuthenticators *prometheus.Desc
AccountingDroppedPackets *prometheus.Desc
AccountingInvalidRequests *prometheus.Desc
AccountingMalformedPackets *prometheus.Desc
AccountingNoRecord *prometheus.Desc
AccountingPacketsReceived *prometheus.Desc
AccountingPacketsSent *prometheus.Desc
AccountingServerResetTime *prometheus.Desc
AccountingServerUpTime *prometheus.Desc
AccountingUnknownType *prometheus.Desc
accountingRequests *prometheus.Desc
accountingResponses *prometheus.Desc
accountingBadAuthenticators *prometheus.Desc
accountingDroppedPackets *prometheus.Desc
accountingInvalidRequests *prometheus.Desc
accountingMalformedPackets *prometheus.Desc
accountingNoRecord *prometheus.Desc
accountingPacketsReceived *prometheus.Desc
accountingPacketsSent *prometheus.Desc
accountingServerResetTime *prometheus.Desc
accountingServerUpTime *prometheus.Desc
accountingUnknownType *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.AccessAccepts = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.accessAccepts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_accepts"),
"(AccessAccepts)",
nil,
nil,
)
c.AccessChallenges = prometheus.NewDesc(
c.accessChallenges = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_challenges"),
"(AccessChallenges)",
nil,
nil,
)
c.AccessRejects = prometheus.NewDesc(
c.accessRejects = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_rejects"),
"(AccessRejects)",
nil,
nil,
)
c.AccessRequests = prometheus.NewDesc(
c.accessRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_requests"),
"(AccessRequests)",
nil,
nil,
)
c.AccessBadAuthenticators = prometheus.NewDesc(
c.accessBadAuthenticators = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_bad_authenticators"),
"(BadAuthenticators)",
nil,
nil,
)
c.AccessDroppedPackets = prometheus.NewDesc(
c.accessDroppedPackets = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_dropped_packets"),
"(DroppedPackets)",
nil,
nil,
)
c.AccessInvalidRequests = prometheus.NewDesc(
c.accessInvalidRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_invalid_requests"),
"(InvalidRequests)",
nil,
nil,
)
c.AccessMalformedPackets = prometheus.NewDesc(
c.accessMalformedPackets = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_malformed_packets"),
"(MalformedPackets)",
nil,
nil,
)
c.AccessPacketsReceived = prometheus.NewDesc(
c.accessPacketsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_packets_received"),
"(PacketsReceived)",
nil,
nil,
)
c.AccessPacketsSent = prometheus.NewDesc(
c.accessPacketsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_packets_sent"),
"(PacketsSent)",
nil,
nil,
)
c.AccessServerResetTime = prometheus.NewDesc(
c.accessServerResetTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_server_reset_time"),
"(ServerResetTime)",
nil,
nil,
)
c.AccessServerUpTime = prometheus.NewDesc(
c.accessServerUpTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_server_up_time"),
"(ServerUpTime)",
nil,
nil,
)
c.AccessUnknownType = prometheus.NewDesc(
c.accessUnknownType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_unknown_type"),
"(UnknownType)",
nil,
nil,
)
c.AccountingRequests = prometheus.NewDesc(
c.accountingRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_requests"),
"(AccountingRequests)",
nil,
nil,
)
c.AccountingResponses = prometheus.NewDesc(
c.accountingResponses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_responses"),
"(AccountingResponses)",
nil,
nil,
)
c.AccountingBadAuthenticators = prometheus.NewDesc(
c.accountingBadAuthenticators = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_bad_authenticators"),
"(BadAuthenticators)",
nil,
nil,
)
c.AccountingDroppedPackets = prometheus.NewDesc(
c.accountingDroppedPackets = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_dropped_packets"),
"(DroppedPackets)",
nil,
nil,
)
c.AccountingInvalidRequests = prometheus.NewDesc(
c.accountingInvalidRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_invalid_requests"),
"(InvalidRequests)",
nil,
nil,
)
c.AccountingMalformedPackets = prometheus.NewDesc(
c.accountingMalformedPackets = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_malformed_packets"),
"(MalformedPackets)",
nil,
nil,
)
c.AccountingNoRecord = prometheus.NewDesc(
c.accountingNoRecord = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_no_record"),
"(NoRecord)",
nil,
nil,
)
c.AccountingPacketsReceived = prometheus.NewDesc(
c.accountingPacketsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_packets_received"),
"(PacketsReceived)",
nil,
nil,
)
c.AccountingPacketsSent = prometheus.NewDesc(
c.accountingPacketsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_packets_sent"),
"(PacketsSent)",
nil,
nil,
)
c.AccountingServerResetTime = prometheus.NewDesc(
c.accountingServerResetTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_server_reset_time"),
"(ServerResetTime)",
nil,
nil,
)
c.AccountingServerUpTime = prometheus.NewDesc(
c.accountingServerUpTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_server_up_time"),
"(ServerUpTime)",
nil,
nil,
)
c.AccountingUnknownType = prometheus.NewDesc(
c.accountingUnknownType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accounting_unknown_type"),
"(UnknownType)",
nil,
@@ -228,7 +241,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.CollectAccept(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accept data: %s", err))
return err
@@ -241,7 +254,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
}
// Win32_PerfRawData_IAS_NPSAuthenticationServer docs:
// at the moment there is no Microsoft documentation
// at the moment there is no Microsoft documentation.
type Win32_PerfRawData_IAS_NPSAuthenticationServer struct {
Name string
@@ -279,7 +292,7 @@ type Win32_PerfRawData_IAS_NPSAccountingServer struct {
// CollectAccept sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) CollectAccept(ch chan<- prometheus.Metric) error {
func (c *Collector) CollectAccept(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_IAS_NPSAuthenticationServer
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -287,79 +300,79 @@ func (c *collector) CollectAccept(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.AccessAccepts,
c.accessAccepts,
prometheus.CounterValue,
float64(dst[0].AccessAccepts),
)
ch <- prometheus.MustNewConstMetric(
c.AccessChallenges,
c.accessChallenges,
prometheus.CounterValue,
float64(dst[0].AccessChallenges),
)
ch <- prometheus.MustNewConstMetric(
c.AccessRejects,
c.accessRejects,
prometheus.CounterValue,
float64(dst[0].AccessRejects),
)
ch <- prometheus.MustNewConstMetric(
c.AccessRequests,
c.accessRequests,
prometheus.CounterValue,
float64(dst[0].AccessRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccessBadAuthenticators,
c.accessBadAuthenticators,
prometheus.CounterValue,
float64(dst[0].AccessBadAuthenticators),
)
ch <- prometheus.MustNewConstMetric(
c.AccessDroppedPackets,
c.accessDroppedPackets,
prometheus.CounterValue,
float64(dst[0].AccessDroppedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccessInvalidRequests,
c.accessInvalidRequests,
prometheus.CounterValue,
float64(dst[0].AccessInvalidRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccessMalformedPackets,
c.accessMalformedPackets,
prometheus.CounterValue,
float64(dst[0].AccessMalformedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccessPacketsReceived,
c.accessPacketsReceived,
prometheus.CounterValue,
float64(dst[0].AccessPacketsReceived),
)
ch <- prometheus.MustNewConstMetric(
c.AccessPacketsSent,
c.accessPacketsSent,
prometheus.CounterValue,
float64(dst[0].AccessPacketsSent),
)
ch <- prometheus.MustNewConstMetric(
c.AccessServerResetTime,
c.accessServerResetTime,
prometheus.CounterValue,
float64(dst[0].AccessServerResetTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccessServerUpTime,
c.accessServerUpTime,
prometheus.CounterValue,
float64(dst[0].AccessServerUpTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccessUnknownType,
c.accessUnknownType,
prometheus.CounterValue,
float64(dst[0].AccessUnknownType),
)
@@ -367,7 +380,7 @@ func (c *collector) CollectAccept(ch chan<- prometheus.Metric) error {
return nil
}
func (c *collector) CollectAccounting(ch chan<- prometheus.Metric) error {
func (c *Collector) CollectAccounting(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_IAS_NPSAccountingServer
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -375,73 +388,73 @@ func (c *collector) CollectAccounting(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.AccountingRequests,
c.accountingRequests,
prometheus.CounterValue,
float64(dst[0].AccountingRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingResponses,
c.accountingResponses,
prometheus.CounterValue,
float64(dst[0].AccountingResponses),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingBadAuthenticators,
c.accountingBadAuthenticators,
prometheus.CounterValue,
float64(dst[0].AccountingBadAuthenticators),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingDroppedPackets,
c.accountingDroppedPackets,
prometheus.CounterValue,
float64(dst[0].AccountingDroppedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingInvalidRequests,
c.accountingInvalidRequests,
prometheus.CounterValue,
float64(dst[0].AccountingInvalidRequests),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingMalformedPackets,
c.accountingMalformedPackets,
prometheus.CounterValue,
float64(dst[0].AccountingMalformedPackets),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingNoRecord,
c.accountingNoRecord,
prometheus.CounterValue,
float64(dst[0].AccountingNoRecord),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingPacketsReceived,
c.accountingPacketsReceived,
prometheus.CounterValue,
float64(dst[0].AccountingPacketsReceived),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingPacketsSent,
c.accountingPacketsSent,
prometheus.CounterValue,
float64(dst[0].AccountingPacketsSent),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingServerResetTime,
c.accountingServerResetTime,
prometheus.CounterValue,
float64(dst[0].AccountingServerResetTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingServerUpTime,
c.accountingServerUpTime,
prometheus.CounterValue,
float64(dst[0].AccountingServerUpTime),
)
ch <- prometheus.MustNewConstMetric(
c.AccountingUnknownType,
c.accountingUnknownType,
prometheus.CounterValue,
float64(dst[0].AccountingUnknownType),
)

View File

@@ -11,16 +11,15 @@ import (
"syscall"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/headers/kernel32"
"github.com/prometheus-community/windows_exporter/pkg/headers/netapi32"
"github.com/prometheus-community/windows_exporter/pkg/headers/psapi"
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
)
@@ -31,23 +30,24 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI metrics.
type Collector struct {
config Config
logger log.Logger
OSInformation *prometheus.Desc
PhysicalMemoryFreeBytes *prometheus.Desc
PagingFreeBytes *prometheus.Desc
VirtualMemoryFreeBytes *prometheus.Desc
ProcessesLimit *prometheus.Desc
ProcessMemoryLimitBytes *prometheus.Desc
Processes *prometheus.Desc
Users *prometheus.Desc
PagingLimitBytes *prometheus.Desc
VirtualMemoryBytes *prometheus.Desc
VisibleMemoryBytes *prometheus.Desc
Time *prometheus.Desc
Timezone *prometheus.Desc
osInformation *prometheus.Desc
pagingFreeBytes *prometheus.Desc
pagingLimitBytes *prometheus.Desc
physicalMemoryFreeBytes *prometheus.Desc
processMemoryLimitBytes *prometheus.Desc
processes *prometheus.Desc
processesLimit *prometheus.Desc
time *prometheus.Desc
timezone *prometheus.Desc
users *prometheus.Desc
virtualMemoryBytes *prometheus.Desc
virtualMemoryFreeBytes *prometheus.Desc
visibleMemoryBytes *prometheus.Desc
}
type pagingFileCounter struct {
@@ -56,102 +56,114 @@ type pagingFileCounter struct {
UsagePeak float64 `perflib:"% Usage Peak"`
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Paging File"}, nil
}
func (c *collector) Build() error {
c.OSInformation = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.osInformation = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"OperatingSystem.Caption, OperatingSystem.Version",
[]string{"product", "version", "major_version", "minor_version", "build_number", "revision"},
nil,
)
c.PagingLimitBytes = prometheus.NewDesc(
c.pagingLimitBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "paging_limit_bytes"),
"OperatingSystem.SizeStoredInPagingFiles",
nil,
nil,
)
c.PagingFreeBytes = prometheus.NewDesc(
c.pagingFreeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "paging_free_bytes"),
"OperatingSystem.FreeSpaceInPagingFiles",
nil,
nil,
)
c.PhysicalMemoryFreeBytes = prometheus.NewDesc(
c.physicalMemoryFreeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "physical_memory_free_bytes"),
"OperatingSystem.FreePhysicalMemory",
nil,
nil,
)
c.Time = prometheus.NewDesc(
c.time = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "time"),
"OperatingSystem.LocalDateTime",
nil,
nil,
)
c.Timezone = prometheus.NewDesc(
c.timezone = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "timezone"),
"OperatingSystem.LocalDateTime",
[]string{"timezone"},
nil,
)
c.Processes = prometheus.NewDesc(
c.processes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processes"),
"OperatingSystem.NumberOfProcesses",
nil,
nil,
)
c.ProcessesLimit = prometheus.NewDesc(
c.processesLimit = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processes_limit"),
"OperatingSystem.MaxNumberOfProcesses",
nil,
nil,
)
c.ProcessMemoryLimitBytes = prometheus.NewDesc(
c.processMemoryLimitBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_limit_bytes"),
"OperatingSystem.MaxProcessMemorySize",
nil,
nil,
)
c.Users = prometheus.NewDesc(
c.users = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "users"),
"OperatingSystem.NumberOfUsers",
nil,
nil,
)
c.VirtualMemoryBytes = prometheus.NewDesc(
c.virtualMemoryBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "virtual_memory_bytes"),
"OperatingSystem.TotalVirtualMemorySize",
nil,
nil,
)
c.VisibleMemoryBytes = prometheus.NewDesc(
c.visibleMemoryBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "visible_memory_bytes"),
"OperatingSystem.TotalVisibleMemorySize",
nil,
nil,
)
c.VirtualMemoryFreeBytes = prometheus.NewDesc(
c.virtualMemoryFreeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "virtual_memory_free_bytes"),
"OperatingSystem.FreeVirtualMemory",
nil,
@@ -162,7 +174,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting os metrics", "err", err)
return err
@@ -171,7 +183,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
}
// Win32_OperatingSystem docs:
// - https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
// - https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class.
type Win32_OperatingSystem struct {
Caption string
FreePhysicalMemory uint64
@@ -188,7 +200,7 @@ type Win32_OperatingSystem struct {
Version string
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
nwgi, err := netapi32.GetWorkstationInfo()
if err != nil {
return err
@@ -211,11 +223,12 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// Get total allocation of paging files across all disks.
memManKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management`, registry.QUERY_VALUE)
defer memManKey.Close()
if err != nil {
return err
}
defer memManKey.Close()
pagingFiles, _, pagingErr := memManKey.GetStringsValue("ExistingPageFiles")
var fsipf float64
@@ -232,12 +245,12 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// Get build number and product name from registry
ntKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
defer ntKey.Close()
if err != nil {
return err
}
defer ntKey.Close()
pn, _, err := ntKey.GetStringValue("ProductName")
if err != nil {
return err
@@ -278,7 +291,7 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
pfb := fsipf - (pfbRaw * float64(gpi.PageSize))
ch <- prometheus.MustNewConstMetric(
c.OSInformation,
c.osInformation,
prometheus.GaugeValue,
1.0,
"Microsoft "+pn, // Caption
@@ -290,19 +303,19 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
)
ch <- prometheus.MustNewConstMetric(
c.PhysicalMemoryFreeBytes,
c.physicalMemoryFreeBytes,
prometheus.GaugeValue,
float64(gmse.AvailPhys),
)
ch <- prometheus.MustNewConstMetric(
c.Time,
c.time,
prometheus.GaugeValue,
float64(currentTime.Unix()),
)
ch <- prometheus.MustNewConstMetric(
c.Timezone,
c.timezone,
prometheus.GaugeValue,
1.0,
timezoneName,
@@ -310,13 +323,13 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
if pagingErr == nil {
ch <- prometheus.MustNewConstMetric(
c.PagingFreeBytes,
c.pagingFreeBytes,
prometheus.GaugeValue,
pfb,
)
ch <- prometheus.MustNewConstMetric(
c.PagingLimitBytes,
c.pagingLimitBytes,
prometheus.GaugeValue,
fsipf,
)
@@ -324,7 +337,7 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
_ = level.Debug(c.logger).Log("msg", "Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
}
ch <- prometheus.MustNewConstMetric(
c.VirtualMemoryFreeBytes,
c.virtualMemoryFreeBytes,
prometheus.GaugeValue,
float64(gmse.AvailPageFile),
)
@@ -333,37 +346,37 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824
// https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem
ch <- prometheus.MustNewConstMetric(
c.ProcessesLimit,
c.processesLimit,
prometheus.GaugeValue,
float64(4294967295),
)
ch <- prometheus.MustNewConstMetric(
c.ProcessMemoryLimitBytes,
c.processMemoryLimitBytes,
prometheus.GaugeValue,
float64(gmse.TotalVirtual),
)
ch <- prometheus.MustNewConstMetric(
c.Processes,
c.processes,
prometheus.GaugeValue,
float64(gpi.ProcessCount),
)
ch <- prometheus.MustNewConstMetric(
c.Users,
c.users,
prometheus.GaugeValue,
float64(nwgi.LoggedOnUsers),
)
ch <- prometheus.MustNewConstMetric(
c.VirtualMemoryBytes,
c.virtualMemoryBytes,
prometheus.GaugeValue,
float64(gmse.TotalPageFile),
)
ch <- prometheus.MustNewConstMetric(
c.VisibleMemoryBytes,
c.visibleMemoryBytes,
prometheus.GaugeValue,
float64(gmse.TotalPhys),
)

View File

@@ -15,197 +15,202 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "physical_disk"
FlagPhysicalDiskExclude = "collector.physical_disk.disk-exclude"
FlagPhysicalDiskInclude = "collector.physical_disk.disk-include"
)
const Name = "physical_disk"
type Config struct {
DiskInclude string `yaml:"disk_include"`
DiskExclude string `yaml:"disk_exclude"`
DiskInclude *regexp.Regexp `yaml:"disk_include"`
DiskExclude *regexp.Regexp `yaml:"disk_exclude"`
}
var ConfigDefaults = Config{
DiskInclude: ".+",
DiskExclude: "",
DiskInclude: types.RegExpAny,
DiskExclude: types.RegExpEmpty,
}
// A collector is a Prometheus collector for perflib PhysicalDisk metrics
type collector struct {
// A Collector is a Prometheus Collector for perflib PhysicalDisk metrics.
type Collector struct {
config Config
logger log.Logger
diskInclude *string
diskExclude *string
diskIncludeSet bool
diskExcludeSet bool
RequestsQueued *prometheus.Desc
ReadBytesTotal *prometheus.Desc
ReadsTotal *prometheus.Desc
WriteBytesTotal *prometheus.Desc
WritesTotal *prometheus.Desc
ReadTime *prometheus.Desc
WriteTime *prometheus.Desc
IdleTime *prometheus.Desc
SplitIOs *prometheus.Desc
ReadLatency *prometheus.Desc
WriteLatency *prometheus.Desc
ReadWriteLatency *prometheus.Desc
diskIncludePattern *regexp.Regexp
diskExcludePattern *regexp.Regexp
idleTime *prometheus.Desc
readBytesTotal *prometheus.Desc
readLatency *prometheus.Desc
readTime *prometheus.Desc
readWriteLatency *prometheus.Desc
readsTotal *prometheus.Desc
requestsQueued *prometheus.Desc
splitIOs *prometheus.Desc
writeBytesTotal *prometheus.Desc
writeLatency *prometheus.Desc
writeTime *prometheus.Desc
writesTotal *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
diskExclude: &config.DiskExclude,
diskInclude: &config.DiskInclude,
if config.DiskExclude == nil {
config.DiskExclude = ConfigDefaults.DiskExclude
}
if config.DiskInclude == nil {
config.DiskInclude = ConfigDefaults.DiskInclude
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{}
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
c.diskInclude = app.Flag(
FlagPhysicalDiskInclude,
"Regexp of disks to include. Disk number must both match include and not match exclude to be included.",
).Default(ConfigDefaults.DiskInclude).PreAction(func(_ *kingpin.ParseContext) error {
c.diskIncludeSet = true
return nil
}).String()
var diskExclude, diskInclude string
c.diskExclude = app.Flag(
FlagPhysicalDiskExclude,
app.Flag(
"collector.physical_disk.disk-exclude",
"Regexp of disks to exclude. Disk number must both match include and not match exclude to be included.",
).Default(ConfigDefaults.DiskExclude).PreAction(func(_ *kingpin.ParseContext) error {
c.diskExcludeSet = true
).Default(c.config.DiskExclude.String()).StringVar(&diskExclude)
app.Flag(
"collector.physical_disk.disk-include",
"Regexp of disks to include. Disk number must both match include and not match exclude to be included.",
).Default(c.config.DiskInclude.String()).StringVar(&diskInclude)
app.Action(func(*kingpin.ParseContext) error {
var err error
c.config.DiskExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", diskExclude))
if err != nil {
return fmt.Errorf("collector.physical_disk.disk-exclude: %w", err)
}
c.config.DiskInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", diskInclude))
if err != nil {
return fmt.Errorf("collector.physical_disk.disk-include: %w", err)
}
return nil
}).String()
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"PhysicalDisk"}, nil
}
func (c *collector) Build() error {
c.RequestsQueued = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.requestsQueued = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
"The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)",
[]string{"disk"},
nil,
)
c.ReadBytesTotal = prometheus.NewDesc(
c.readBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_bytes_total"),
"The number of bytes transferred from the disk during read operations (PhysicalDisk.DiskReadBytesPerSec)",
[]string{"disk"},
nil,
)
c.ReadsTotal = prometheus.NewDesc(
c.readsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "reads_total"),
"The number of read operations on the disk (PhysicalDisk.DiskReadsPerSec)",
[]string{"disk"},
nil,
)
c.WriteBytesTotal = prometheus.NewDesc(
c.writeBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_bytes_total"),
"The number of bytes transferred to the disk during write operations (PhysicalDisk.DiskWriteBytesPerSec)",
[]string{"disk"},
nil,
)
c.WritesTotal = prometheus.NewDesc(
c.writesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "writes_total"),
"The number of write operations on the disk (PhysicalDisk.DiskWritesPerSec)",
[]string{"disk"},
nil,
)
c.ReadTime = prometheus.NewDesc(
c.readTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_seconds_total"),
"Seconds that the disk was busy servicing read requests (PhysicalDisk.PercentDiskReadTime)",
[]string{"disk"},
nil,
)
c.WriteTime = prometheus.NewDesc(
c.writeTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_seconds_total"),
"Seconds that the disk was busy servicing write requests (PhysicalDisk.PercentDiskWriteTime)",
[]string{"disk"},
nil,
)
c.IdleTime = prometheus.NewDesc(
c.idleTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "idle_seconds_total"),
"Seconds that the disk was idle (PhysicalDisk.PercentIdleTime)",
[]string{"disk"},
nil,
)
c.SplitIOs = prometheus.NewDesc(
c.splitIOs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "split_ios_total"),
"The number of I/Os to the disk were split into multiple I/Os (PhysicalDisk.SplitIOPerSec)",
[]string{"disk"},
nil,
)
c.ReadLatency = prometheus.NewDesc(
c.readLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_latency_seconds_total"),
"Shows the average time, in seconds, of a read operation from the disk (PhysicalDisk.AvgDiskSecPerRead)",
[]string{"disk"},
nil,
)
c.WriteLatency = prometheus.NewDesc(
c.writeLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "write_latency_seconds_total"),
"Shows the average time, in seconds, of a write operation to the disk (PhysicalDisk.AvgDiskSecPerWrite)",
[]string{"disk"},
nil,
)
c.ReadWriteLatency = prometheus.NewDesc(
c.readWriteLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "read_write_latency_seconds_total"),
"Shows the time, in seconds, of the average disk transfer (PhysicalDisk.AvgDiskSecPerTransfer)",
[]string{"disk"},
nil,
)
var err error
c.diskIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.diskInclude))
if err != nil {
return err
}
c.diskExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.diskExclude))
if err != nil {
return err
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting physical_disk metrics", "err", err)
return err
@@ -215,7 +220,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
// PhysicalDisk
// Win32_PerfRawData_PerfDisk_PhysicalDisk docs:
// - https://docs.microsoft.com/en-us/previous-versions/aa394308(v=vs.85) - Win32_PerfRawData_PerfDisk_PhysicalDisk class
// - https://docs.microsoft.com/en-us/previous-versions/aa394308(v=vs.85) - Win32_PerfRawData_PerfDisk_PhysicalDisk class.
type PhysicalDisk struct {
Name string
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
@@ -232,7 +237,7 @@ type PhysicalDisk struct {
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PhysicalDisk
if err := perflib.UnmarshalObject(ctx.PerfObjects["PhysicalDisk"], &dst, c.logger); err != nil {
return err
@@ -240,8 +245,8 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
for _, disk := range dst {
if disk.Name == "_Total" ||
c.diskExcludePattern.MatchString(disk.Name) ||
!c.diskIncludePattern.MatchString(disk.Name) {
c.config.DiskExclude.MatchString(disk.Name) ||
!c.config.DiskInclude.MatchString(disk.Name) {
continue
}
@@ -250,84 +255,84 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
disk_number, _, _ := strings.Cut(disk.Name, " ")
ch <- prometheus.MustNewConstMetric(
c.RequestsQueued,
c.requestsQueued,
prometheus.GaugeValue,
disk.CurrentDiskQueueLength,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTotal,
c.readBytesTotal,
prometheus.CounterValue,
disk.DiskReadBytesPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.ReadsTotal,
c.readsTotal,
prometheus.CounterValue,
disk.DiskReadsPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTotal,
c.writeBytesTotal,
prometheus.CounterValue,
disk.DiskWriteBytesPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.WritesTotal,
c.writesTotal,
prometheus.CounterValue,
disk.DiskWritesPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.ReadTime,
c.readTime,
prometheus.CounterValue,
disk.PercentDiskReadTime,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.WriteTime,
c.writeTime,
prometheus.CounterValue,
disk.PercentDiskWriteTime,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.IdleTime,
c.idleTime,
prometheus.CounterValue,
disk.PercentIdleTime,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.SplitIOs,
c.splitIOs,
prometheus.CounterValue,
disk.SplitIOPerSec,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.ReadLatency,
c.readLatency,
prometheus.CounterValue,
disk.AvgDiskSecPerRead*perflib.TicksToSecondScaleFactor,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.WriteLatency,
c.writeLatency,
prometheus.CounterValue,
disk.AvgDiskSecPerWrite*perflib.TicksToSecondScaleFactor,
disk_number,
)
ch <- prometheus.MustNewConstMetric(
c.ReadWriteLatency,
c.readWriteLatency,
prometheus.CounterValue,
disk.AvgDiskSecPerTransfer*perflib.TicksToSecondScaleFactor,
disk_number,

View File

@@ -0,0 +1,12 @@
package physical_disk_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
"github.com/prometheus-community/windows_exporter/pkg/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, physical_disk.Name, physical_disk.NewWithFlags)
}

View File

@@ -10,18 +10,12 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "printer"
FlagPrinterInclude = "collector.printer.include"
FlagPrinterExclude = "collector.printer.exclude"
)
const Name = "printer"
// printerStatusMap source: https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-printer#:~:text=Power%20Save-,PrinterStatus,Offline%20(7),-PrintJobDataType
var printerStatusMap = map[uint16]string{
@@ -35,60 +29,91 @@ var printerStatusMap = map[uint16]string{
}
type Config struct {
Include string `yaml:"printer_include"`
Exclude string `yaml:"printer_exclude"`
PrinterInclude *regexp.Regexp `yaml:"printer_include"`
PrinterExclude *regexp.Regexp `yaml:"printer_exclude"`
}
var ConfigDefaults = Config{
Include: ".+",
Exclude: "",
PrinterInclude: types.RegExpAny,
PrinterExclude: types.RegExpEmpty,
}
type collector struct {
type Collector struct {
config Config
logger log.Logger
printerInclude *string
printerExclude *string
printerStatus *prometheus.Desc
printerJobStatus *prometheus.Desc
printerJobCount *prometheus.Desc
printerIncludePattern *regexp.Regexp
printerExcludePattern *regexp.Regexp
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
printerInclude: &config.Include,
printerExclude: &config.Exclude,
if config.PrinterExclude == nil {
config.PrinterExclude = ConfigDefaults.PrinterExclude
}
if config.PrinterInclude == nil {
config.PrinterInclude = ConfigDefaults.PrinterInclude
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{
printerInclude: app.Flag(
FlagPrinterInclude,
"Regular expression to match printers to collect metrics for",
).Default(ConfigDefaults.Include).String(),
printerExclude: app.Flag(
FlagPrinterExclude,
"Regular expression to match printers to exclude",
).Default(ConfigDefaults.Exclude).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var printerInclude, printerExclude string
app.Flag(
"collector.printer.include",
"Regular expression to match printers to collect metrics for",
).Default(c.config.PrinterInclude.String()).StringVar(&printerInclude)
app.Flag(
"collector.printer.exclude",
"Regular expression to match printers to exclude",
).Default(c.config.PrinterExclude.String()).StringVar(&printerExclude)
app.Action(func(*kingpin.ParseContext) error {
var err error
c.config.PrinterInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", printerInclude))
if err != nil {
return fmt.Errorf("collector.printer.include: %w", err)
}
c.config.PrinterExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", printerExclude))
if err != nil {
return fmt.Errorf("collector.printer.exclude: %w", err)
}
return nil
})
return c
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.printerJobStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "job_status"),
"A counter of printer jobs by status",
@@ -108,53 +133,50 @@ func (c *collector) Build() error {
nil,
)
var err error
c.printerIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.printerInclude))
if err != nil {
return err
}
c.printerExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.printerExclude))
return err
return nil
}
func (c *collector) GetName() string { return Name }
func (c *Collector) GetName() string { return Name }
func (c *collector) GetPerfCounter() ([]string, error) { return []string{"Printer"}, nil }
func (c *Collector) GetPerfCounter() ([]string, error) { return []string{"Printer"}, nil }
type win32_Printer struct {
type wmiPrinter struct {
Name string
Default bool
PrinterStatus uint16
JobCountSinceLastReset uint32
}
type win32_PrintJob struct {
type wmiPrintJob struct {
Name string
Status string
}
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectPrinterStatus(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed to collect printer status metrics", "err", err)
return err
}
if err := c.collectPrinterJobStatus(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed to collect printer job status metrics", "err", err)
return err
}
return nil
}
func (c *collector) collectPrinterStatus(ch chan<- prometheus.Metric) error {
var printers []win32_Printer
q := wmi.QueryAll(&printers, c.logger)
func (c *Collector) collectPrinterStatus(ch chan<- prometheus.Metric) error {
var printers []wmiPrinter
q := wmi.QueryAllForClass(&printers, "win32_Printer", c.logger)
if err := wmi.Query(q, &printers); err != nil {
return err
}
for _, printer := range printers {
if c.printerExcludePattern.MatchString(printer.Name) ||
!c.printerIncludePattern.MatchString(printer.Name) {
if c.config.PrinterExclude.MatchString(printer.Name) ||
!c.config.PrinterInclude.MatchString(printer.Name) {
continue
}
@@ -184,9 +206,10 @@ func (c *collector) collectPrinterStatus(ch chan<- prometheus.Metric) error {
return nil
}
func (c *collector) collectPrinterJobStatus(ch chan<- prometheus.Metric) error {
var printJobs []win32_PrintJob
q := wmi.QueryAll(&printJobs, c.logger)
func (c *Collector) collectPrinterJobStatus(ch chan<- prometheus.Metric) error {
var printJobs []wmiPrintJob
q := wmi.QueryAllForClass(&printJobs, "win32_PrintJob", c.logger)
if err := wmi.Query(q, &printJobs); err != nil {
return err
}
@@ -201,6 +224,7 @@ func (c *collector) collectPrinterJobStatus(ch chan<- prometheus.Metric) error {
group.status,
)
}
return nil
}
@@ -209,19 +233,22 @@ type PrintJobStatusGroup struct {
status string
}
func (c *collector) groupPrintJobs(printJobs []win32_PrintJob) map[PrintJobStatusGroup]int {
func (c *Collector) groupPrintJobs(printJobs []wmiPrintJob) map[PrintJobStatusGroup]int {
groupedPrintJobs := make(map[PrintJobStatusGroup]int)
for _, printJob := range printJobs {
printerName := strings.Split(printJob.Name, ",")[0]
if c.printerExcludePattern.MatchString(printerName) ||
!c.printerIncludePattern.MatchString(printerName) {
if c.config.PrinterExclude.MatchString(printerName) ||
!c.config.PrinterInclude.MatchString(printerName) {
continue
}
groupedPrintJobs[PrintJobStatusGroup{
printerName: printerName,
status: printJob.Status,
}]++
}
return groupedPrintJobs
}

View File

@@ -4,7 +4,6 @@ import (
"testing"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
"github.com/prometheus-community/windows_exporter/pkg/testutils"
)
@@ -12,6 +11,6 @@ import (
func BenchmarkCollector(b *testing.B) {
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all printers.
printersInclude := ".+"
kingpin.CommandLine.GetArg(printer.FlagPrinterInclude).StringVar(&printersInclude)
kingpin.CommandLine.GetArg("collector.printer.include").StringVar(&printersInclude)
testutils.FuncBenchmarkCollector(b, "printer", printer.NewWithFlags)
}

View File

@@ -5,7 +5,6 @@ package process
import (
"errors"
"fmt"
"golang.org/x/sys/windows"
"regexp"
"strconv"
"strings"
@@ -16,210 +15,229 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
)
const (
Name = "process"
FlagProcessExclude = "collector.process.exclude"
FlagProcessInclude = "collector.process.include"
FlagEnableWorkerProcess = "collector.process.iis"
FlagEnableReportOwner = "collector.process.report-owner"
)
const Name = "process"
type Config struct {
ProcessInclude string `yaml:"process_include"`
ProcessExclude string `yaml:"process_exclude"`
EnableWorkerProcess bool `yaml:"enable_iis_worker_process"`
EnableReportOwner bool `yaml:"enable_report_owner"`
ProcessInclude *regexp.Regexp `yaml:"process_include"`
ProcessExclude *regexp.Regexp `yaml:"process_exclude"`
EnableWorkerProcess bool `yaml:"enable_iis_worker_process"` //nolint:tagliatelle
EnableReportOwner bool `yaml:"enable_report_owner"`
}
var ConfigDefaults = Config{
ProcessInclude: ".+",
ProcessExclude: "",
ProcessInclude: types.RegExpAny,
ProcessExclude: types.RegExpEmpty,
EnableWorkerProcess: false,
EnableReportOwner: false,
}
type collector struct {
type Collector struct {
config Config
logger log.Logger
processInclude *string
processExclude *string
enableWorkerProcess *bool
enableReportOwner *bool
StartTime *prometheus.Desc
CPUTimeTotal *prometheus.Desc
HandleCount *prometheus.Desc
IOBytesTotal *prometheus.Desc
IOOperationsTotal *prometheus.Desc
PageFaultsTotal *prometheus.Desc
PageFileBytes *prometheus.Desc
PoolBytes *prometheus.Desc
PriorityBase *prometheus.Desc
PrivateBytes *prometheus.Desc
ThreadCount *prometheus.Desc
VirtualBytes *prometheus.Desc
WorkingSetPrivate *prometheus.Desc
WorkingSetPeak *prometheus.Desc
WorkingSet *prometheus.Desc
processIncludePattern *regexp.Regexp
processExcludePattern *regexp.Regexp
lookupCache map[string]string
cpuTimeTotal *prometheus.Desc
handleCount *prometheus.Desc
ioBytesTotal *prometheus.Desc
ioOperationsTotal *prometheus.Desc
pageFaultsTotal *prometheus.Desc
pageFileBytes *prometheus.Desc
poolBytes *prometheus.Desc
priorityBase *prometheus.Desc
privateBytes *prometheus.Desc
startTime *prometheus.Desc
threadCount *prometheus.Desc
virtualBytes *prometheus.Desc
workingSet *prometheus.Desc
workingSetPeak *prometheus.Desc
workingSetPrivate *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
processExclude: &config.ProcessExclude,
processInclude: &config.ProcessInclude,
enableWorkerProcess: &config.EnableWorkerProcess,
if config.ProcessExclude == nil {
config.ProcessExclude = ConfigDefaults.ProcessExclude
}
if config.ProcessInclude == nil {
config.ProcessInclude = ConfigDefaults.ProcessInclude
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{
processInclude: app.Flag(
FlagProcessInclude,
"Regexp of processes to include. Process name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.ProcessInclude).String(),
processExclude: app.Flag(
FlagProcessExclude,
"Regexp of processes to exclude. Process name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.ProcessExclude).String(),
enableWorkerProcess: app.Flag(
FlagEnableWorkerProcess,
"Enable IIS worker process name queries. May cause the collector to leak memory.",
).Default("false").Bool(),
enableReportOwner: app.Flag(
FlagEnableReportOwner,
"Enable reporting of process owner.",
).Default("false").Bool(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var processExclude, processInclude string
app.Flag(
"collector.process.exclude",
"Regexp of processes to exclude. Process name must both match include and not match exclude to be included.",
).Default(c.config.ProcessExclude.String()).StringVar(&processExclude)
app.Flag(
"collector.process.include",
"Regexp of processes to include. Process name must both match include and not match exclude to be included.",
).Default(c.config.ProcessInclude.String()).StringVar(&processInclude)
app.Flag(
"collector.process.iis",
"Enable IIS worker process name queries. May cause the collector to leak memory.",
).Default(strconv.FormatBool(c.config.EnableWorkerProcess)).BoolVar(&c.config.EnableWorkerProcess)
app.Flag(
"collector.process.report-owner",
"Enable reporting of process owner.",
).Default(strconv.FormatBool(c.config.EnableReportOwner)).BoolVar(&c.config.EnableReportOwner)
app.Action(func(*kingpin.ParseContext) error {
var err error
c.config.ProcessExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", processExclude))
if err != nil {
return fmt.Errorf("collector.process.exclude: %w", err)
}
c.config.ProcessInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", processInclude))
if err != nil {
return fmt.Errorf("collector.process.include: %w", err)
}
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Process"}, nil
}
func (c *collector) Build() error {
if c.processInclude != nil && *c.processInclude == ".*" && utils.IsEmpty(c.processExclude) {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
if c.config.ProcessInclude.String() == "^(?:.*)$" && c.config.ProcessExclude.String() == "^(?:)$" {
_ = level.Warn(c.logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
}
commonLabels := make([]string, 0)
if *c.enableReportOwner {
if c.config.EnableReportOwner {
commonLabels = []string{"owner"}
}
c.StartTime = prometheus.NewDesc(
c.startTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "start_time"),
"Time of process start.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.CPUTimeTotal = prometheus.NewDesc(
c.cpuTimeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_time_total"),
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).",
append(commonLabels, "process", "process_id", "creating_process_id", "mode"),
nil,
)
c.HandleCount = prometheus.NewDesc(
c.handleCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "handles"),
"Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.IOBytesTotal = prometheus.NewDesc(
c.ioBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "io_bytes_total"),
"Bytes issued to I/O operations in different modes (read, write, other).",
append(commonLabels, "process", "process_id", "creating_process_id", "mode"),
nil,
)
c.IOOperationsTotal = prometheus.NewDesc(
c.ioOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "io_operations_total"),
"I/O operations issued in different modes (read, write, other).",
append(commonLabels, "process", "process_id", "creating_process_id", "mode"),
nil,
)
c.PageFaultsTotal = prometheus.NewDesc(
c.pageFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_faults_total"),
"Page faults by the threads executing in this process.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.PageFileBytes = prometheus.NewDesc(
c.pageFileBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_file_bytes"),
"Current number of bytes this process has used in the paging file(s).",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.PoolBytes = prometheus.NewDesc(
c.poolBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_bytes"),
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.",
append(commonLabels, "process", "process_id", "creating_process_id", "pool"),
nil,
)
c.PriorityBase = prometheus.NewDesc(
c.priorityBase = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "priority_base"),
"Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.PrivateBytes = prometheus.NewDesc(
c.privateBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "private_bytes"),
"Current number of bytes this process has allocated that cannot be shared with other processes.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.ThreadCount = prometheus.NewDesc(
c.threadCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "threads"),
"Number of threads currently active in this process.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.VirtualBytes = prometheus.NewDesc(
c.virtualBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "virtual_bytes"),
"Current size, in bytes, of the virtual address space that the process is using.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.WorkingSetPrivate = prometheus.NewDesc(
c.workingSetPrivate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_private_bytes"),
"Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.WorkingSetPeak = prometheus.NewDesc(
c.workingSetPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_peak_bytes"),
"Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.",
append(commonLabels, "process", "process_id", "creating_process_id"),
nil,
)
c.WorkingSet = prometheus.NewDesc(
c.workingSet = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_bytes"),
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.",
append(commonLabels, "process", "process_id", "creating_process_id"),
@@ -228,18 +246,6 @@ func (c *collector) Build() error {
c.lookupCache = make(map[string]string)
var err error
c.processIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.processInclude))
if err != nil {
return err
}
c.processExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.processExclude))
if err != nil {
return err
}
return nil
}
@@ -263,7 +269,7 @@ type perflibProcess struct {
PageFaultsPerSec float64 `perflib:"Page Faults/sec"`
PageFileBytesPeak float64 `perflib:"Page File Bytes Peak"`
PageFileBytes float64 `perflib:"Page File Bytes"`
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
PoolNonPagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
PriorityBase float64 `perflib:"Priority Base"`
PrivateBytes float64 `perflib:"Private Bytes"`
@@ -280,17 +286,17 @@ type WorkerProcess struct {
ProcessId uint64
}
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
data := make([]perflibProcess, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Process"], &data, c.logger)
if err != nil {
return err
}
var dst_wp []WorkerProcess
if *c.enableWorkerProcess {
q_wp := wmi.QueryAll(&dst_wp, c.logger)
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
var workerProcesses []WorkerProcess
if c.config.EnableWorkerProcess {
queryWorkerProcess := wmi.QueryAllForClass(&workerProcesses, "WorkerProcess", c.logger)
if err := wmi.QueryNamespace(queryWorkerProcess, &workerProcesses, "root\\WebAdministration"); err != nil {
_ = level.Debug(c.logger).Log("msg", "Could not query WebAdministration namespace for IIS worker processes", "err", err)
}
}
@@ -299,17 +305,18 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
for _, process := range data {
if process.Name == "_Total" ||
c.processExcludePattern.MatchString(process.Name) ||
!c.processIncludePattern.MatchString(process.Name) {
c.config.ProcessExclude.MatchString(process.Name) ||
!c.config.ProcessInclude.MatchString(process.Name) {
continue
}
// Duplicate processes are suffixed # and an index number. Remove those.
processName := strings.Split(process.Name, "#")[0]
pid := strconv.FormatUint(uint64(process.IDProcess), 10)
cpid := strconv.FormatUint(uint64(process.CreatingProcessID), 10)
if *c.enableWorkerProcess {
for _, wp := range dst_wp {
// Duplicate processes are suffixed #, and an index number. Remove those.
processName, _, _ := strings.Cut(process.Name, "#")
pid := strconv.FormatUint(uint64(process.IDProcess), 10)
parentPID := strconv.FormatUint(uint64(process.CreatingProcessID), 10)
if c.config.EnableWorkerProcess {
for _, wp := range workerProcesses {
if wp.ProcessId == uint64(process.IDProcess) {
processName = strings.Join([]string{processName, wp.AppPoolName}, "_")
break
@@ -319,7 +326,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
labels := make([]string, 0, 4)
if *c.enableReportOwner {
if c.config.EnableReportOwner {
owner, err = c.getProcessOwner(int(process.IDProcess))
if err != nil {
owner = "unknown"
@@ -328,150 +335,150 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
labels = []string{owner}
}
labels = append(labels, processName, pid, cpid)
labels = append(labels, processName, pid, parentPID)
ch <- prometheus.MustNewConstMetric(
c.StartTime,
c.startTime,
prometheus.GaugeValue,
process.ElapsedTime,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.HandleCount,
c.handleCount,
prometheus.GaugeValue,
process.HandleCount,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.CPUTimeTotal,
c.cpuTimeTotal,
prometheus.CounterValue,
process.PercentPrivilegedTime,
append(labels, "privileged")...,
)
ch <- prometheus.MustNewConstMetric(
c.CPUTimeTotal,
c.cpuTimeTotal,
prometheus.CounterValue,
process.PercentUserTime,
append(labels, "user")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
c.ioBytesTotal,
prometheus.CounterValue,
process.IOOtherBytesPerSec,
append(labels, "other")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
c.ioOperationsTotal,
prometheus.CounterValue,
process.IOOtherOperationsPerSec,
append(labels, "other")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
c.ioBytesTotal,
prometheus.CounterValue,
process.IOReadBytesPerSec,
append(labels, "read")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
c.ioOperationsTotal,
prometheus.CounterValue,
process.IOReadOperationsPerSec,
append(labels, "read")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOBytesTotal,
c.ioBytesTotal,
prometheus.CounterValue,
process.IOWriteBytesPerSec,
append(labels, "write")...,
)
ch <- prometheus.MustNewConstMetric(
c.IOOperationsTotal,
c.ioOperationsTotal,
prometheus.CounterValue,
process.IOWriteOperationsPerSec,
append(labels, "write")...,
)
ch <- prometheus.MustNewConstMetric(
c.PageFaultsTotal,
c.pageFaultsTotal,
prometheus.CounterValue,
process.PageFaultsPerSec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.PageFileBytes,
c.pageFileBytes,
prometheus.GaugeValue,
process.PageFileBytes,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.PoolBytes,
c.poolBytes,
prometheus.GaugeValue,
process.PoolNonpagedBytes,
process.PoolNonPagedBytes,
append(labels, "nonpaged")...,
)
ch <- prometheus.MustNewConstMetric(
c.PoolBytes,
c.poolBytes,
prometheus.GaugeValue,
process.PoolPagedBytes,
append(labels, "paged")...,
)
ch <- prometheus.MustNewConstMetric(
c.PriorityBase,
c.priorityBase,
prometheus.GaugeValue,
process.PriorityBase,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.PrivateBytes,
c.privateBytes,
prometheus.GaugeValue,
process.PrivateBytes,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.ThreadCount,
c.threadCount,
prometheus.GaugeValue,
process.ThreadCount,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.VirtualBytes,
c.virtualBytes,
prometheus.GaugeValue,
process.VirtualBytes,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPrivate,
c.workingSetPrivate,
prometheus.GaugeValue,
process.WorkingSetPrivate,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPeak,
c.workingSetPeak,
prometheus.GaugeValue,
process.WorkingSetPeak,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSet,
c.workingSet,
prometheus.GaugeValue,
process.WorkingSet,
labels...,
@@ -482,14 +489,14 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
}
// ref: https://github.com/microsoft/hcsshim/blob/8beabacfc2d21767a07c20f8dd5f9f3932dbf305/internal/uvm/stats.go#L25
func (c *collector) getProcessOwner(pid int) (string, error) {
func (c *Collector) getProcessOwner(pid int) (string, error) {
p, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid))
if errors.Is(err, syscall.Errno(0x57)) { // invalid parameter, for PIDs that don't exist
return "", errors.New("process not found")
}
if err != nil {
return "", fmt.Errorf("OpenProcess: %T %w", err, err)
return "", fmt.Errorf("OpenProcess: %w", err)
}
defer windows.Close(p)

View File

@@ -9,9 +9,9 @@ import (
)
func BenchmarkProcessCollector(b *testing.B) {
// Include is not set in testing context (kingpin flags not parsed), causing the collector to skip all processes.
// PrinterInclude is not set in testing context (kingpin flags not parsed), causing the collector to skip all processes.
localProcessInclude := ".+"
kingpin.CommandLine.GetArg(process.FlagProcessInclude).StringVar(&localProcessInclude)
kingpin.CommandLine.GetArg("collector.process.include").StringVar(&localProcessInclude)
// No context name required as collector source is WMI
testutils.FuncBenchmarkCollector(b, process.Name, process.NewWithFlags)
}

View File

@@ -87,7 +87,7 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
time.Since(t).Seconds(),
)
if err != nil {
ch <- prometheus.NewInvalidMetric(coll.scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %v", err))
ch <- prometheus.NewInvalidMetric(coll.scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %w", err))
return
}
@@ -112,7 +112,7 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
}()
for name, c := range coll.collectors.collectors {
go func(name string, c types.Collector) {
go func(name string, c Collector) {
defer wg.Done()
outcome := coll.execute(name, c, scrapeContext, metricsBuffer)
l.Lock()
@@ -171,7 +171,7 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
l.Unlock()
}
func (coll *Prometheus) execute(name string, c types.Collector, ctx *types.ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome {
func (coll *Prometheus) execute(name string, c Collector, ctx *types.ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome {
t := time.Now()
err := c.Collect(ctx, ch)
duration := time.Since(t).Seconds()

View File

@@ -20,136 +20,149 @@ type Config struct{}
var ConfigDefaults = Config{}
// collector
// A RemoteFxNetworkCollector is a Prometheus collector for
// Collector
// A RemoteFxNetworkCollector is a Prometheus Collector for
// WMI Win32_PerfRawData_Counters_RemoteFXNetwork & Win32_PerfRawData_Counters_RemoteFXGraphics metrics
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxnetwork/
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/
type collector struct {
type Collector struct {
config Config
logger log.Logger
// net
BaseTCPRTT *prometheus.Desc
BaseUDPRTT *prometheus.Desc
CurrentTCPBandwidth *prometheus.Desc
CurrentTCPRTT *prometheus.Desc
CurrentUDPBandwidth *prometheus.Desc
CurrentUDPRTT *prometheus.Desc
TotalReceivedBytes *prometheus.Desc
TotalSentBytes *prometheus.Desc
UDPPacketsReceivedPersec *prometheus.Desc
UDPPacketsSentPersec *prometheus.Desc
FECRate *prometheus.Desc
LossRate *prometheus.Desc
RetransmissionRate *prometheus.Desc
baseTCPRTT *prometheus.Desc
baseUDPRTT *prometheus.Desc
currentTCPBandwidth *prometheus.Desc
currentTCPRTT *prometheus.Desc
currentUDPBandwidth *prometheus.Desc
currentUDPRTT *prometheus.Desc
fecRate *prometheus.Desc
lossRate *prometheus.Desc
retransmissionRate *prometheus.Desc
totalReceivedBytes *prometheus.Desc
totalSentBytes *prometheus.Desc
udpPacketsReceivedPerSec *prometheus.Desc
udpPacketsSentPerSec *prometheus.Desc
// gfx
AverageEncodingTime *prometheus.Desc
FrameQuality *prometheus.Desc
FramesSkippedPerSecondInsufficientResources *prometheus.Desc
GraphicsCompressionratio *prometheus.Desc
InputFramesPerSecond *prometheus.Desc
OutputFramesPerSecond *prometheus.Desc
SourceFramesPerSecond *prometheus.Desc
averageEncodingTime *prometheus.Desc
frameQuality *prometheus.Desc
framesSkippedPerSecondInsufficientResources *prometheus.Desc
graphicsCompressionRatio *prometheus.Desc
inputFramesPerSecond *prometheus.Desc
outputFramesPerSecond *prometheus.Desc
sourceFramesPerSecond *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"RemoteFX Network", "RemoteFX Graphics"}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
// net
c.BaseTCPRTT = prometheus.NewDesc(
c.baseTCPRTT = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
"Base TCP round-trip time (RTT) detected in seconds",
[]string{"session_name"},
nil,
)
c.BaseUDPRTT = prometheus.NewDesc(
c.baseUDPRTT = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_base_udp_rtt_seconds"),
"Base UDP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
)
c.CurrentTCPBandwidth = prometheus.NewDesc(
c.currentTCPBandwidth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_current_tcp_bandwidth"),
"TCP Bandwidth detected in bytes per second.",
[]string{"session_name"},
nil,
)
c.CurrentTCPRTT = prometheus.NewDesc(
c.currentTCPRTT = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_current_tcp_rtt_seconds"),
"Average TCP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
)
c.CurrentUDPBandwidth = prometheus.NewDesc(
c.currentUDPBandwidth = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_current_udp_bandwidth"),
"UDP Bandwidth detected in bytes per second.",
[]string{"session_name"},
nil,
)
c.CurrentUDPRTT = prometheus.NewDesc(
c.currentUDPRTT = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_current_udp_rtt_seconds"),
"Average UDP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
)
c.TotalReceivedBytes = prometheus.NewDesc(
c.totalReceivedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_received_bytes_total"),
"(TotalReceivedBytes)",
[]string{"session_name"},
nil,
)
c.TotalSentBytes = prometheus.NewDesc(
c.totalSentBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_sent_bytes_total"),
"(TotalSentBytes)",
[]string{"session_name"},
nil,
)
c.UDPPacketsReceivedPersec = prometheus.NewDesc(
c.udpPacketsReceivedPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_udp_packets_received_total"),
"Rate in packets per second at which packets are received over UDP.",
[]string{"session_name"},
nil,
)
c.UDPPacketsSentPersec = prometheus.NewDesc(
c.udpPacketsSentPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_udp_packets_sent_total"),
"Rate in packets per second at which packets are sent over UDP.",
[]string{"session_name"},
nil,
)
c.FECRate = prometheus.NewDesc(
c.fecRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_fec_rate"),
"Forward Error Correction (FEC) percentage",
[]string{"session_name"},
nil,
)
c.LossRate = prometheus.NewDesc(
c.lossRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_loss_rate"),
"Loss percentage",
[]string{"session_name"},
nil,
)
c.RetransmissionRate = prometheus.NewDesc(
c.retransmissionRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_retransmission_rate"),
"Percentage of packets that have been retransmitted",
[]string{"session_name"},
@@ -157,43 +170,43 @@ func (c *collector) Build() error {
)
// gfx
c.AverageEncodingTime = prometheus.NewDesc(
c.averageEncodingTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gfx_average_encoding_time_seconds"),
"Average frame encoding time in seconds",
[]string{"session_name"},
nil,
)
c.FrameQuality = prometheus.NewDesc(
c.frameQuality = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gfx_frame_quality"),
"Quality of the output frame expressed as a percentage of the quality of the source frame.",
[]string{"session_name"},
nil,
)
c.FramesSkippedPerSecondInsufficientResources = prometheus.NewDesc(
c.framesSkippedPerSecondInsufficientResources = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gfx_frames_skipped_insufficient_resource_total"),
"Number of frames skipped per second due to insufficient client resources.",
[]string{"session_name", "resource"},
nil,
)
c.GraphicsCompressionratio = prometheus.NewDesc(
c.graphicsCompressionRatio = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gfx_graphics_compression_ratio"),
"Ratio of the number of bytes encoded to the number of bytes input.",
[]string{"session_name"},
nil,
)
c.InputFramesPerSecond = prometheus.NewDesc(
c.inputFramesPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gfx_input_frames_total"),
"Number of sources frames provided as input to RemoteFX graphics per second.",
[]string{"session_name"},
nil,
)
c.OutputFramesPerSecond = prometheus.NewDesc(
c.outputFramesPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gfx_output_frames_total"),
"Number of frames sent to the client per second.",
[]string{"session_name"},
nil,
)
c.SourceFramesPerSecond = prometheus.NewDesc(
c.sourceFramesPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "gfx_source_frames_total"),
"Number of frames composed by the source (DWM) per second.",
[]string{"session_name"},
@@ -204,7 +217,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session count metrics", "err", err)
return err
@@ -233,7 +246,7 @@ type perflibRemoteFxNetwork struct {
RetransmissionRate float64 `perflib:"Percentage of packets that have been retransmitted"`
}
func (c *collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibRemoteFxNetwork, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Network"], &dst, c.logger)
if err != nil {
@@ -247,81 +260,81 @@ func (c *collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, ch cha
continue
}
ch <- prometheus.MustNewConstMetric(
c.BaseTCPRTT,
c.baseTCPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.BaseTCPRTT),
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.BaseUDPRTT,
c.baseUDPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.BaseUDPRTT),
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPBandwidth,
c.currentTCPBandwidth,
prometheus.GaugeValue,
(d.CurrentTCPBandwidth*1000)/8,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPRTT,
c.currentTCPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.CurrentTCPRTT),
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPBandwidth,
c.currentUDPBandwidth,
prometheus.GaugeValue,
(d.CurrentUDPBandwidth*1000)/8,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPRTT,
c.currentUDPRTT,
prometheus.GaugeValue,
utils.MilliSecToSec(d.CurrentUDPRTT),
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.TotalReceivedBytes,
c.totalReceivedBytes,
prometheus.CounterValue,
d.TotalReceivedBytes,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.TotalSentBytes,
c.totalSentBytes,
prometheus.CounterValue,
d.TotalSentBytes,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsReceivedPersec,
c.udpPacketsReceivedPerSec,
prometheus.CounterValue,
d.UDPPacketsReceivedPersec,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsSentPersec,
c.udpPacketsSentPerSec,
prometheus.CounterValue,
d.UDPPacketsSentPersec,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.FECRate,
c.fecRate,
prometheus.GaugeValue,
d.FECRate,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.LossRate,
c.lossRate,
prometheus.GaugeValue,
d.LossRate,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.RetransmissionRate,
c.retransmissionRate,
prometheus.GaugeValue,
d.RetransmissionRate,
normalizeSessionName(d.Name),
@@ -343,7 +356,7 @@ type perflibRemoteFxGraphics struct {
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
}
func (c *collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibRemoteFxGraphics, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Graphics"], &dst, c.logger)
if err != nil {
@@ -357,58 +370,58 @@ func (c *collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch
continue
}
ch <- prometheus.MustNewConstMetric(
c.AverageEncodingTime,
c.averageEncodingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.AverageEncodingTime),
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.FrameQuality,
c.frameQuality,
prometheus.GaugeValue,
d.FrameQuality,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientClientResources,
normalizeSessionName(d.Name),
"client",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientNetworkResources,
normalizeSessionName(d.Name),
"network",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
c.framesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientServerResources,
normalizeSessionName(d.Name),
"server",
)
ch <- prometheus.MustNewConstMetric(
c.GraphicsCompressionratio,
c.graphicsCompressionRatio,
prometheus.GaugeValue,
d.GraphicsCompressionratio,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.InputFramesPerSecond,
c.inputFramesPerSecond,
prometheus.CounterValue,
d.InputFramesPerSecond,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.OutputFramesPerSecond,
c.outputFramesPerSecond,
prometheus.CounterValue,
d.OutputFramesPerSecond,
normalizeSessionName(d.Name),
)
ch <- prometheus.MustNewConstMetric(
c.SourceFramesPerSecond,
c.sourceFramesPerSecond,
prometheus.CounterValue,
d.SourceFramesPerSecond,
normalizeSessionName(d.Name),
@@ -418,7 +431,7 @@ func (c *collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch
return nil
}
// normalizeSessionName ensure that the session is the same between WTS API and performance counters
// normalizeSessionName ensure that the session is the same between WTS API and performance counters.
func normalizeSessionName(sessionName string) string {
return strings.Replace(sessionName, "RDP-tcp", "RDP-Tcp", 1)
}

View File

@@ -9,45 +9,34 @@ import (
"runtime"
"strings"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/go-ole/go-ole"
"github.com/go-ole/go-ole/oleutil"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "scheduled_task"
FlagScheduledTaskExclude = "collector.scheduled_task.exclude"
FlagScheduledTaskInclude = "collector.scheduled_task.include"
)
const Name = "scheduled_task"
type Config struct {
TaskExclude string `yaml:"task_exclude"`
TaskInclude string `yaml:"task_include"`
TaskExclude *regexp.Regexp `yaml:"task_exclude"`
TaskInclude *regexp.Regexp `yaml:"task_include"`
}
var ConfigDefaults = Config{
TaskExclude: "",
TaskInclude: ".+",
TaskExclude: types.RegExpEmpty,
TaskInclude: types.RegExpAny,
}
type collector struct {
type Collector struct {
config Config
logger log.Logger
taskExclude *string
taskInclude *string
LastResult *prometheus.Desc
MissedRuns *prometheus.Desc
State *prometheus.Desc
taskIncludePattern *regexp.Regexp
taskExcludePattern *regexp.Regexp
lastResult *prometheus.Desc
missedRuns *prometheus.Desc
state *prometheus.Desc
}
// TaskState ...
@@ -62,7 +51,11 @@ const (
TASK_STATE_QUEUED
TASK_STATE_READY
TASK_STATE_RUNNING
TASK_RESULT_SUCCESS TaskResult = 0x0
)
const (
SCHED_S_SUCCESS TaskResult = 0x0
SCHED_S_TASK_HAS_NOT_RUN TaskResult = 0x00041303
)
type ScheduledTask struct {
@@ -76,85 +69,106 @@ type ScheduledTask struct {
type ScheduledTasks []ScheduledTask
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
taskExclude: &config.TaskExclude,
taskInclude: &config.TaskInclude,
if config.TaskExclude == nil {
config.TaskExclude = ConfigDefaults.TaskExclude
}
if config.TaskInclude == nil {
config.TaskInclude = ConfigDefaults.TaskInclude
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{
taskInclude: app.Flag(
FlagScheduledTaskInclude,
"Regexp of tasks to include. Task path must both match include and not match exclude to be included.",
).Default(ConfigDefaults.TaskInclude).String(),
taskExclude: app.Flag(
FlagScheduledTaskExclude,
"Regexp of tasks to exclude. Task path must both match include and not match exclude to be included.",
).Default(ConfigDefaults.TaskExclude).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var taskExclude, taskInclude string
app.Flag(
"collector.scheduled_task.exclude",
"Regexp of tasks to exclude. Task path must both match include and not match exclude to be included.",
).Default(c.config.TaskExclude.String()).StringVar(&taskExclude)
app.Flag(
"collector.scheduled_task.include",
"Regexp of tasks to include. Task path must both match include and not match exclude to be included.",
).Default(c.config.TaskInclude.String()).StringVar(&taskInclude)
app.Action(func(*kingpin.ParseContext) error {
var err error
c.config.TaskExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", taskExclude))
if err != nil {
return fmt.Errorf("collector.scheduled_task.exclude: %w", err)
}
c.config.TaskInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", taskInclude))
if err != nil {
return fmt.Errorf("collector.scheduled_task.include: %w", err)
}
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.LastResult = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.lastResult = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "last_result"),
"The result that was returned the last time the registered task was run",
[]string{"task"},
nil,
)
c.MissedRuns = prometheus.NewDesc(
c.missedRuns = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "missed_runs"),
"The number of times the registered task missed a scheduled run",
[]string{"task"},
nil,
)
c.State = prometheus.NewDesc(
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"The current state of a scheduled task",
[]string{"task", "state"},
nil,
)
var err error
c.taskIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.taskInclude))
if err != nil {
return err
}
c.taskExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.taskExclude))
if err != nil {
return err
}
return nil
}
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting user metrics", "err", err)
return err
@@ -165,37 +179,18 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
var TASK_STATES = []string{"disabled", "queued", "ready", "running", "unknown"}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
scheduledTasks, err := getScheduledTasks()
if err != nil {
return err
}
for _, task := range scheduledTasks {
if c.taskExcludePattern.MatchString(task.Path) ||
!c.taskIncludePattern.MatchString(task.Path) {
if c.config.TaskExclude.MatchString(task.Path) ||
!c.config.TaskInclude.MatchString(task.Path) {
continue
}
lastResult := 0.0
if task.LastTaskResult == TASK_RESULT_SUCCESS {
lastResult = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.LastResult,
prometheus.GaugeValue,
lastResult,
task.Path,
)
ch <- prometheus.MustNewConstMetric(
c.MissedRuns,
prometheus.GaugeValue,
task.MissedRunsCount,
task.Path,
)
for _, state := range TASK_STATES {
var stateValue float64
@@ -204,13 +199,36 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.State,
c.state,
prometheus.GaugeValue,
stateValue,
task.Path,
state,
)
}
if task.LastTaskResult == SCHED_S_TASK_HAS_NOT_RUN {
continue
}
lastResult := 0.0
if task.LastTaskResult == SCHED_S_SUCCESS {
lastResult = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.lastResult,
prometheus.GaugeValue,
lastResult,
task.Path,
)
ch <- prometheus.MustNewConstMetric(
c.missedRuns,
prometheus.GaugeValue,
task.MissedRunsCount,
task.Path,
)
}
return nil
@@ -221,7 +239,9 @@ const SCHEDULED_TASK_PROGRAM_ID = "Schedule.Service.1"
// S_FALSE is returned by CoInitialize if it was already called on this thread.
const S_FALSE = 0x00000001
func getScheduledTasks() (scheduledTasks ScheduledTasks, err error) {
func getScheduledTasks() (ScheduledTasks, error) {
var scheduledTasks ScheduledTasks
// The only way to run WMI queries in parallel while being thread-safe is to
// ensure the CoInitialize[Ex]() call is bound to its current OS thread.
// Otherwise, attempting to initialize and run parallel queries across
@@ -316,7 +336,9 @@ func fetchTasksRecursively(folder *ole.IDispatch, scheduledTasks *ScheduledTasks
return err
}
func parseTask(task *ole.IDispatch) (scheduledTask ScheduledTask, err error) {
func parseTask(task *ole.IDispatch) (ScheduledTask, error) {
var scheduledTask ScheduledTask
taskNameVar, err := oleutil.GetProperty(task, "Name")
if err != nil {
return scheduledTask, err
@@ -379,7 +401,9 @@ func parseTask(task *ole.IDispatch) (scheduledTask ScheduledTask, err error) {
scheduledTask.Name = taskNameVar.ToString()
scheduledTask.Path = strings.ReplaceAll(taskPathVar.ToString(), "\\", "/")
scheduledTask.Enabled = taskEnabledVar.Value().(bool)
if val, ok := taskEnabledVar.Value().(bool); ok {
scheduledTask.Enabled = val
}
scheduledTask.State = TaskState(taskStateVar.Val)
scheduledTask.MissedRunsCount = float64(taskNumberOfMissedRunsVar.Val)
scheduledTask.LastTaskResult = TaskResult(taskLastTaskResultVar.Val)

View File

@@ -21,12 +21,7 @@ import (
"golang.org/x/sys/windows/svc/mgr"
)
const (
Name = "service"
FlagServiceWhereClause = "collector.service.services-where"
FlagServiceUseAPI = "collector.service.use-api"
FlagServiceCollectorV2 = "collector.service.v2"
)
const Name = "service"
type Config struct {
ServiceWhereClause string `yaml:"service_where_clause"`
@@ -40,8 +35,8 @@ var ConfigDefaults = Config{
V2: false,
}
// A collector is a Prometheus collector for WMI Win32_Service metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_Service metrics.
type Collector struct {
logger log.Logger
serviceWhereClause *string
@@ -55,49 +50,54 @@ type collector struct {
StateV2 *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
c := &Collector{
serviceWhereClause: &config.ServiceWhereClause,
useAPI: &config.UseAPI,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
func NewWithFlags(app *kingpin.Application) *Collector {
return &Collector{
serviceWhereClause: app.Flag(
FlagServiceWhereClause,
"collector.service.services-where",
"WQL 'where' clause to use in WMI metrics query. Limits the response to the services you specify and reduces the size of the response.",
).Default(ConfigDefaults.ServiceWhereClause).String(),
useAPI: app.Flag(
FlagServiceUseAPI,
"collector.service.use-api",
"Use API calls to collect service data instead of WMI. Flag 'collector.service.services-where' won't be effective.",
).Default(strconv.FormatBool(ConfigDefaults.UseAPI)).Bool(),
v2: app.Flag(
FlagServiceCollectorV2,
"collector.service.v2",
"Enable V2 service collector. This collector can services state much more efficiently, can't provide general service information.",
).Default(strconv.FormatBool(ConfigDefaults.V2)).Bool(),
}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
if utils.IsEmpty(c.serviceWhereClause) {
_ = level.Warn(c.logger).Log("msg", "No where-clause specified for service collector. This will generate a very large number of metrics!")
}
@@ -141,7 +141,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var err error
switch {
@@ -224,9 +224,9 @@ var (
}
)
func (c *collector) collectWMI(ch chan<- prometheus.Metric) error {
func (c *Collector) collectWMI(ch chan<- prometheus.Metric) error {
var dst []Win32_Service
q := wmi.QueryAllWhere(&dst, *c.serviceWhereClause, c.logger)
q := wmi.QueryAllWhere(&dst, *c.serviceWhereClause, c.logger) //nolint:staticcheck
if err := wmi.Query(q, &dst); err != nil {
return err
}
@@ -292,7 +292,7 @@ func (c *collector) collectWMI(ch chan<- prometheus.Metric) error {
return nil
}
func (c *collector) collectAPI(ch chan<- prometheus.Metric) error {
func (c *Collector) collectAPI(ch chan<- prometheus.Metric) error {
svcmgrConnection, err := mgr.Connect()
if err != nil {
return err
@@ -384,7 +384,7 @@ func (c *collector) collectAPI(ch chan<- prometheus.Metric) error {
return nil
}
func (c *collector) collectAPIV2(ch chan<- prometheus.Metric) error {
func (c *Collector) collectAPIV2(ch chan<- prometheus.Metric) error {
services, err := c.queryAllServiceStates()
if err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to query services", "err", err)
@@ -428,7 +428,7 @@ func (c *collector) collectAPIV2(ch chan<- prometheus.Metric) error {
// Copyright 2016-present Datadog, Inc.
//
// Source: https://github.com/DataDog/datadog-agent/blob/afbd8b6c87939c92610c654cb07fdfd439e4fb27/pkg/util/winutil/scmmonitor.go#L61-L96
func (c *collector) queryAllServiceStates() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
func (c *Collector) queryAllServiceStates() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
// EnumServiceStatusEx requires only SC_MANAGER_ENUM_SERVICE.
h, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE)
if err != nil {

View File

@@ -3,9 +3,6 @@
package smb
import (
"fmt"
"os"
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
@@ -16,80 +13,57 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "smb"
FlagSmbListAllCollectors = "collectors.smb.list"
FlagSmbCollectorsEnabled = "collectors.smb.enabled"
)
const Name = "smb"
type Config struct {
CollectorsEnabled string `yaml:"collectors_enabled"`
}
type Config struct{}
var ConfigDefaults = Config{
CollectorsEnabled: "",
}
var ConfigDefaults = Config{}
type collector struct {
type Collector struct {
config Config
logger log.Logger
smbListAllCollectors *bool
smbCollectorsEnabled *string
TreeConnectCount *prometheus.Desc
CurrentOpenFileCount *prometheus.Desc
enabledCollectors []string
treeConnectCount *prometheus.Desc
currentOpenFileCount *prometheus.Desc
}
// All available collector functions
var smbAllCollectorNames = []string{
"ServerShares",
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
smbListAllCollectors := false
c := &collector{
smbCollectorsEnabled: &config.CollectorsEnabled,
smbListAllCollectors: &smbListAllCollectors,
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
smbListAllCollectors: app.Flag(
FlagSmbListAllCollectors,
"List the collectors along with their perflib object name/ids",
).Bool(),
smbCollectorsEnabled: app.Flag(
FlagSmbCollectorsEnabled,
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(ConfigDefaults.CollectorsEnabled).String(),
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{
"SMB Server Shares",
}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
// desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
return prometheus.NewDesc(
@@ -100,56 +74,24 @@ func (c *collector) Build() error {
)
}
c.CurrentOpenFileCount = desc("server_shares_current_open_file_count", "Current total count open files on the SMB Server")
c.TreeConnectCount = desc("server_shares_tree_connect_count", "Count of user connections to the SMB Server")
c.currentOpenFileCount = desc("server_shares_current_open_file_count", "Current total count open files on the SMB Server")
c.treeConnectCount = desc("server_shares_tree_connect_count", "Count of user connections to the SMB Server")
c.enabledCollectors = make([]string, 0, len(smbAllCollectorNames))
return nil
}
collectorDesc := map[string]string{
"ServerShares": "SMB Server Shares",
}
// Collect collects smb metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectServerShares(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed to collect server share metrics", "err", err)
if *c.smbListAllCollectors {
fmt.Printf("%-32s %-32s\n", "Collector Name", "Perflib Object")
for _, cname := range smbAllCollectorNames {
fmt.Printf("%-32s %-32s\n", cname, collectorDesc[cname])
}
os.Exit(0)
}
if *c.smbCollectorsEnabled == "" {
for _, collectorName := range smbAllCollectorNames {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
}
} else {
for _, collectorName := range strings.Split(*c.smbCollectorsEnabled, ",") {
if slices.Contains(smbAllCollectorNames, collectorName) {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
} else {
return fmt.Errorf("unknown smb collector: %s", collectorName)
}
}
return err
}
return nil
}
// Collect collects smb metrics and sends them to prometheus
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error{
"ServerShares": c.collectServerShares,
}
for _, collectorName := range c.enabledCollectors {
if err := collectorFuncs[collectorName](ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err)
return err
}
}
return nil
}
// Perflib: SMB Server Shares
// Perflib: SMB Server Shares.
type perflibServerShares struct {
Name string
@@ -157,7 +99,7 @@ type perflibServerShares struct {
TreeConnectCount float64 `perflib:"Tree Connect Count"`
}
func (c *collector) collectServerShares(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectServerShares(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibServerShares
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Server Shares"], &data, c.logger); err != nil {
return err
@@ -169,23 +111,22 @@ func (c *collector) collectServerShares(ctx *types.ScrapeContext, ch chan<- prom
}
ch <- prometheus.MustNewConstMetric(
c.CurrentOpenFileCount,
c.currentOpenFileCount,
prometheus.CounterValue,
instance.CurrentOpenFileCount,
)
ch <- prometheus.MustNewConstMetric(
c.TreeConnectCount,
c.treeConnectCount,
prometheus.CounterValue,
instance.TreeConnectCount,
)
}
return nil
}
// toLabelName converts strings to lowercase and replaces all whitespaces and dots with underscores
func (c *collector) toLabelName(name string) string {
// toLabelName converts strings to lowercase and replaces all whitespaces and dots with underscores.
func (c *Collector) toLabelName(name string) string {
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
s = strings.ReplaceAll(s, "__", "_")
return s

View File

@@ -3,9 +3,6 @@
package smbclient
import (
"fmt"
"os"
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
@@ -17,100 +14,78 @@ import (
)
const (
Name = "smbclient"
FlagSmbClientListAllCollectors = "collectors.smbclient.list"
FlagSmbClientCollectorsEnabled = "collectors.smbclient.enabled"
Name = "smbclient"
)
type Config struct {
CollectorsEnabled string `yaml:"collectors_enabled"`
}
type Config struct{}
var ConfigDefaults = Config{
CollectorsEnabled: "",
}
var ConfigDefaults = Config{}
type collector struct {
type Collector struct {
config Config
logger log.Logger
smbclientListAllCollectors *bool
smbclientCollectorsEnabled *string
ReadRequestQueueSecsTotal *prometheus.Desc
ReadBytesTotal *prometheus.Desc
ReadsTotal *prometheus.Desc
ReadBytesTransmittedViaSMBDirectTotal *prometheus.Desc
ReadRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
TurboIOReadsTotal *prometheus.Desc
ReadSecsTotal *prometheus.Desc
WriteRequestQueueSecsTotal *prometheus.Desc
WriteBytesTotal *prometheus.Desc
WritesTotal *prometheus.Desc
WriteBytesTransmittedViaSMBDirectTotal *prometheus.Desc
WriteRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
readBytesTotal *prometheus.Desc
readBytesTransmittedViaSMBDirectTotal *prometheus.Desc
readRequestQueueSecsTotal *prometheus.Desc
readRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
readSecsTotal *prometheus.Desc
readsTotal *prometheus.Desc
turboIOReadsTotal *prometheus.Desc
TurboIOWritesTotal *prometheus.Desc
WriteSecsTotal *prometheus.Desc
writeBytesTotal *prometheus.Desc
writeBytesTransmittedViaSMBDirectTotal *prometheus.Desc
writeRequestQueueSecsTotal *prometheus.Desc
writeRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
writeSecsTotal *prometheus.Desc
writesTotal *prometheus.Desc
RequestQueueSecsTotal *prometheus.Desc
RequestSecs *prometheus.Desc
CreditStallsTotal *prometheus.Desc
CurrentDataQueued *prometheus.Desc
DataBytesTotal *prometheus.Desc
DataRequestsTotal *prometheus.Desc
MetadataRequestsTotal *prometheus.Desc
enabledCollectors []string
creditStallsTotal *prometheus.Desc
currentDataQueued *prometheus.Desc
dataBytesTotal *prometheus.Desc
dataRequestsTotal *prometheus.Desc
metadataRequestsTotal *prometheus.Desc
requestQueueSecsTotal *prometheus.Desc
requestSecs *prometheus.Desc
}
// All available collector functions
var smbclientAllCollectorNames = []string{
"ClientShares",
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
smbclientListAllCollectors := false
c := &collector{
smbclientCollectorsEnabled: &config.CollectorsEnabled,
smbclientListAllCollectors: &smbclientListAllCollectors,
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
smbclientListAllCollectors: app.Flag(
FlagSmbClientListAllCollectors,
"List the collectors along with their perflib object name/ids",
).Bool(),
smbclientCollectorsEnabled: app.Flag(
FlagSmbClientCollectorsEnabled,
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
).Default(ConfigDefaults.CollectorsEnabled).String(),
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{
"SMB Client Shares",
}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
// desc creates a new prometheus description
desc := func(metricName string, description string, labels []string) *prometheus.Desc {
return prometheus.NewDesc(
@@ -121,59 +96,59 @@ func (c *collector) Build() error {
)
}
c.RequestQueueSecsTotal = desc("data_queue_seconds_total",
c.requestQueueSecsTotal = desc("data_queue_seconds_total",
"Seconds requests waited on queue on this share",
[]string{"server", "share"},
)
c.ReadRequestQueueSecsTotal = desc("read_queue_seconds_total",
c.readRequestQueueSecsTotal = desc("read_queue_seconds_total",
"Seconds read requests waited on queue on this share",
[]string{"server", "share"},
)
c.WriteRequestQueueSecsTotal = desc("write_queue_seconds_total",
c.writeRequestQueueSecsTotal = desc("write_queue_seconds_total",
"Seconds write requests waited on queue on this share",
[]string{"server", "share"},
)
c.RequestSecs = desc("request_seconds_total",
c.requestSecs = desc("request_seconds_total",
"Seconds waiting for requests on this share",
[]string{"server", "share"},
)
c.CreditStallsTotal = desc("stalls_total",
c.creditStallsTotal = desc("stalls_total",
"The number of requests delayed based on insufficient credits on this share",
[]string{"server", "share"},
)
c.CurrentDataQueued = desc("requests_queued",
c.currentDataQueued = desc("requests_queued",
"The point in time number of requests outstanding on this share",
[]string{"server", "share"},
)
c.DataBytesTotal = desc("data_bytes_total",
c.dataBytesTotal = desc("data_bytes_total",
"The bytes read or written on this share",
[]string{"server", "share"},
)
c.DataRequestsTotal = desc("requests_total",
c.dataRequestsTotal = desc("requests_total",
"The requests on this share",
[]string{"server", "share"},
)
c.MetadataRequestsTotal = desc("metadata_requests_total",
c.metadataRequestsTotal = desc("metadata_requests_total",
"The metadata requests on this share",
[]string{"server", "share"},
)
c.ReadBytesTransmittedViaSMBDirectTotal = desc("read_bytes_via_smbdirect_total",
c.readBytesTransmittedViaSMBDirectTotal = desc("read_bytes_via_smbdirect_total",
"The bytes read from this share via RDMA direct placement",
[]string{"server", "share"},
)
c.ReadBytesTotal = desc("read_bytes_total",
c.readBytesTotal = desc("read_bytes_total",
"The bytes read on this share",
[]string{"server", "share"},
)
c.ReadRequestsTransmittedViaSMBDirectTotal = desc("read_requests_via_smbdirect_total",
c.readRequestsTransmittedViaSMBDirectTotal = desc("read_requests_via_smbdirect_total",
"The read requests on this share via RDMA direct placement",
[]string{"server", "share"},
)
c.ReadsTotal = desc("read_requests_total",
c.readsTotal = desc("read_requests_total",
"The read requests on this share",
[]string{"server", "share"},
)
c.TurboIOReadsTotal = desc("turbo_io_reads_total",
c.turboIOReadsTotal = desc("turbo_io_reads_total",
"The read requests that go through Turbo I/O",
[]string{"server", "share"},
)
@@ -181,78 +156,45 @@ func (c *collector) Build() error {
"The write requests that go through Turbo I/O",
[]string{"server", "share"},
)
c.WriteBytesTransmittedViaSMBDirectTotal = desc("write_bytes_via_smbdirect_total",
c.writeBytesTransmittedViaSMBDirectTotal = desc("write_bytes_via_smbdirect_total",
"The written bytes to this share via RDMA direct placement",
[]string{"server", "share"},
)
c.WriteBytesTotal = desc("write_bytes_total",
c.writeBytesTotal = desc("write_bytes_total",
"The bytes written on this share",
[]string{"server", "share"},
)
c.WriteRequestsTransmittedViaSMBDirectTotal = desc("write_requests_via_smbdirect_total",
c.writeRequestsTransmittedViaSMBDirectTotal = desc("write_requests_via_smbdirect_total",
"The write requests to this share via RDMA direct placement",
[]string{"server", "share"},
)
c.WritesTotal = desc("write_requests_total",
c.writesTotal = desc("write_requests_total",
"The write requests on this share",
[]string{"server", "share"},
)
c.ReadSecsTotal = desc("read_seconds_total",
c.readSecsTotal = desc("read_seconds_total",
"Seconds waiting for read requests on this share",
[]string{"server", "share"},
)
c.WriteSecsTotal = desc("write_seconds_total",
c.writeSecsTotal = desc("write_seconds_total",
"Seconds waiting for write requests on this share",
[]string{"server", "share"},
)
c.enabledCollectors = make([]string, 0, len(smbclientAllCollectorNames))
return nil
}
collectorDesc := map[string]string{
"ClientShares": "SMB Client Shares",
}
if *c.smbclientListAllCollectors {
fmt.Printf("%-32s %-32s\n", "Collector Name", "Perflib Object")
for _, cname := range smbclientAllCollectorNames {
fmt.Printf("%-32s %-32s\n", cname, collectorDesc[cname])
}
os.Exit(0)
}
if *c.smbclientCollectorsEnabled == "" {
for _, collectorName := range smbclientAllCollectorNames {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
}
} else {
for _, collectorName := range strings.Split(*c.smbclientCollectorsEnabled, ",") {
if slices.Contains(smbclientAllCollectorNames, collectorName) {
c.enabledCollectors = append(c.enabledCollectors, collectorName)
} else {
return fmt.Errorf("unknown smbclient collector: %s", collectorName)
}
}
// Collect collects smb client metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectClientShares(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "Error in ClientShares", "err", err)
return err
}
return nil
}
// Collect collects smb client metrics and sends them to prometheus
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error{
"ClientShares": c.collectClientShares,
}
for _, collectorName := range c.enabledCollectors {
if err := collectorFuncs[collectorName](ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err)
return err
}
}
return nil
}
// Perflib: SMB Client Shares
// Perflib: SMB Client Shares.
type perflibClientShares struct {
Name string
@@ -279,7 +221,7 @@ type perflibClientShares struct {
WriteRequestsPerSec float64 `perflib:"Write Requests/sec"`
}
func (c *collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibClientShares
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Client Shares"], &data, c.logger); err != nil {
return err
@@ -294,7 +236,7 @@ func (c *collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prom
shareValue := parsed[1]
// Request time spent on queue. Convert from ticks to seconds.
ch <- prometheus.MustNewConstMetric(
c.RequestQueueSecsTotal,
c.requestQueueSecsTotal,
prometheus.CounterValue,
instance.AvgDataQueueLength*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
@@ -302,28 +244,28 @@ func (c *collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prom
// Read time spent on queue. Convert from ticks to seconds.
ch <- prometheus.MustNewConstMetric(
c.ReadRequestQueueSecsTotal,
c.readRequestQueueSecsTotal,
prometheus.CounterValue,
instance.AvgReadQueueLength*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadSecsTotal,
c.readSecsTotal,
prometheus.CounterValue,
instance.AvgSecPerRead*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WriteSecsTotal,
c.writeSecsTotal,
prometheus.CounterValue,
instance.AvgSecPerWrite*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.RequestSecs,
c.requestSecs,
prometheus.CounterValue,
instance.AvgSecPerDataRequest*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
@@ -331,77 +273,77 @@ func (c *collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prom
// Write time spent on queue. Convert from ticks to seconds.
ch <- prometheus.MustNewConstMetric(
c.WriteRequestQueueSecsTotal,
c.writeRequestQueueSecsTotal,
prometheus.CounterValue,
instance.AvgWriteQueueLength*perflib.TicksToSecondScaleFactor,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.CreditStallsTotal,
c.creditStallsTotal,
prometheus.CounterValue,
instance.CreditStallsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentDataQueued,
c.currentDataQueued,
prometheus.GaugeValue,
instance.CurrentDataQueueLength,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.DataBytesTotal,
c.dataBytesTotal,
prometheus.CounterValue,
instance.DataBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.DataRequestsTotal,
c.dataRequestsTotal,
prometheus.CounterValue,
instance.DataRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.MetadataRequestsTotal,
c.metadataRequestsTotal,
prometheus.CounterValue,
instance.MetadataRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTransmittedViaSMBDirectTotal,
c.readBytesTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.ReadBytesTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadBytesTotal,
c.readBytesTotal,
prometheus.CounterValue,
instance.ReadBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadRequestsTransmittedViaSMBDirectTotal,
c.readRequestsTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.ReadRequestsTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.ReadsTotal,
c.readsTotal,
prometheus.CounterValue,
instance.ReadRequestsPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.TurboIOReadsTotal,
c.turboIOReadsTotal,
prometheus.CounterValue,
instance.TurboIOReadsPerSec,
serverValue, shareValue,
@@ -415,33 +357,32 @@ func (c *collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prom
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTransmittedViaSMBDirectTotal,
c.writeBytesTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.WriteBytesTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WriteBytesTotal,
c.writeBytesTotal,
prometheus.CounterValue,
instance.WriteBytesPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WriteRequestsTransmittedViaSMBDirectTotal,
c.writeRequestsTransmittedViaSMBDirectTotal,
prometheus.CounterValue,
instance.WriteRequestsTransmittedViaSMBDirectPerSec,
serverValue, shareValue,
)
ch <- prometheus.MustNewConstMetric(
c.WritesTotal,
c.writesTotal,
prometheus.CounterValue,
instance.WriteRequestsPerSec,
serverValue, shareValue,
)
}
return nil
}

View File

@@ -14,391 +14,402 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const (
Name = "smtp"
FlagSmtpServerExclude = "collector.smtp.server-exclude"
FlagSmtpServerInclude = "collector.smtp.server-include"
)
const Name = "smtp"
type Config struct {
ServerInclude string `yaml:"server_include"`
ServerExclude string `yaml:"server_exclude"`
ServerInclude *regexp.Regexp `yaml:"server_include"`
ServerExclude *regexp.Regexp `yaml:"server_exclude"`
}
var ConfigDefaults = Config{
ServerInclude: ".+",
ServerExclude: "",
ServerInclude: types.RegExpAny,
ServerExclude: types.RegExpEmpty,
}
type collector struct {
type Collector struct {
config Config
logger log.Logger
serverInclude *string
serverExclude *string
BadmailedMessagesBadPickupFileTotal *prometheus.Desc
BadmailedMessagesGeneralFailureTotal *prometheus.Desc
BadmailedMessagesHopCountExceededTotal *prometheus.Desc
BadmailedMessagesNDROfDSNTotal *prometheus.Desc
BadmailedMessagesNoRecipientsTotal *prometheus.Desc
BadmailedMessagesTriggeredViaEventTotal *prometheus.Desc
BytesSentTotal *prometheus.Desc
BytesReceivedTotal *prometheus.Desc
CategorizerQueueLength *prometheus.Desc
ConnectionErrorsTotal *prometheus.Desc
CurrentMessagesInLocalDelivery *prometheus.Desc
DirectoryDropsTotal *prometheus.Desc
DNSQueriesTotal *prometheus.Desc
DSNFailuresTotal *prometheus.Desc
ETRNMessagesTotal *prometheus.Desc
InboundConnectionsCurrent *prometheus.Desc
InboundConnectionsTotal *prometheus.Desc
LocalQueueLength *prometheus.Desc
LocalRetryQueueLength *prometheus.Desc
MailFilesOpen *prometheus.Desc
MessageBytesReceivedTotal *prometheus.Desc
MessageBytesSentTotal *prometheus.Desc
MessageDeliveryRetriesTotal *prometheus.Desc
MessageSendRetriesTotal *prometheus.Desc
MessagesCurrentlyUndeliverable *prometheus.Desc
MessagesDeliveredTotal *prometheus.Desc
MessagesPendingRouting *prometheus.Desc
MessagesReceivedTotal *prometheus.Desc
MessagesRefusedForAddressObjectsTotal *prometheus.Desc
MessagesRefusedForMailObjectsTotal *prometheus.Desc
MessagesRefusedForSizeTotal *prometheus.Desc
MessagesSentTotal *prometheus.Desc
MessagesSubmittedTotal *prometheus.Desc
NDRsGeneratedTotal *prometheus.Desc
OutboundConnectionsCurrent *prometheus.Desc
OutboundConnectionsRefusedTotal *prometheus.Desc
OutboundConnectionsTotal *prometheus.Desc
QueueFilesOpen *prometheus.Desc
PickupDirectoryMessagesRetrievedTotal *prometheus.Desc
RemoteQueueLength *prometheus.Desc
RemoteRetryQueueLength *prometheus.Desc
RoutingTableLookupsTotal *prometheus.Desc
serverIncludePattern *regexp.Regexp
serverExcludePattern *regexp.Regexp
badMailedMessagesBadPickupFileTotal *prometheus.Desc
badMailedMessagesGeneralFailureTotal *prometheus.Desc
badMailedMessagesHopCountExceededTotal *prometheus.Desc
badMailedMessagesNDROfDSNTotal *prometheus.Desc
badMailedMessagesNoRecipientsTotal *prometheus.Desc
badMailedMessagesTriggeredViaEventTotal *prometheus.Desc
bytesReceivedTotal *prometheus.Desc
bytesSentTotal *prometheus.Desc
categorizerQueueLength *prometheus.Desc
connectionErrorsTotal *prometheus.Desc
currentMessagesInLocalDelivery *prometheus.Desc
dnsQueriesTotal *prometheus.Desc
dsnFailuresTotal *prometheus.Desc
directoryDropsTotal *prometheus.Desc
etrnMessagesTotal *prometheus.Desc
inboundConnectionsCurrent *prometheus.Desc
inboundConnectionsTotal *prometheus.Desc
localQueueLength *prometheus.Desc
localRetryQueueLength *prometheus.Desc
mailFilesOpen *prometheus.Desc
messageBytesReceivedTotal *prometheus.Desc
messageBytesSentTotal *prometheus.Desc
messageDeliveryRetriesTotal *prometheus.Desc
messageSendRetriesTotal *prometheus.Desc
messagesCurrentlyUndeliverable *prometheus.Desc
messagesDeliveredTotal *prometheus.Desc
messagesPendingRouting *prometheus.Desc
messagesReceivedTotal *prometheus.Desc
messagesRefusedForAddressObjectsTotal *prometheus.Desc
messagesRefusedForMailObjectsTotal *prometheus.Desc
messagesRefusedForSizeTotal *prometheus.Desc
messagesSentTotal *prometheus.Desc
messagesSubmittedTotal *prometheus.Desc
ndrsGeneratedTotal *prometheus.Desc
outboundConnectionsCurrent *prometheus.Desc
outboundConnectionsRefusedTotal *prometheus.Desc
outboundConnectionsTotal *prometheus.Desc
pickupDirectoryMessagesRetrievedTotal *prometheus.Desc
queueFilesOpen *prometheus.Desc
remoteQueueLength *prometheus.Desc
remoteRetryQueueLength *prometheus.Desc
routingTableLookupsTotal *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
serverExclude: &config.ServerExclude,
serverInclude: &config.ServerInclude,
if config.ServerExclude == nil {
config.ServerExclude = ConfigDefaults.ServerExclude
}
if config.ServerInclude == nil {
config.ServerInclude = ConfigDefaults.ServerInclude
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
c := &collector{
serverInclude: app.Flag(
FlagSmtpServerInclude,
"Regexp of virtual servers to include. Server name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.ServerInclude).String(),
serverExclude: app.Flag(
FlagSmtpServerExclude,
"Regexp of virtual servers to exclude. Server name must both match include and not match exclude to be included.",
).Default(ConfigDefaults.ServerExclude).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var serverExclude, serverInclude string
app.Flag(
"collector.smtp.server-exclude",
"Regexp of virtual servers to exclude. Server name must both match include and not match exclude to be included.",
).Default(c.config.ServerExclude.String()).StringVar(&serverExclude)
app.Flag(
"collector.smtp.server-include",
"Regexp of virtual servers to include. Server name must both match include and not match exclude to be included.",
).Default(c.config.ServerInclude.String()).StringVar(&serverInclude)
app.Action(func(*kingpin.ParseContext) error {
var err error
c.config.ServerExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", serverExclude))
if err != nil {
return fmt.Errorf("collector.smtp.server-exclude: %w", err)
}
c.config.ServerInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", serverInclude))
if err != nil {
return fmt.Errorf("collector.smtp.server-include: %w", err)
}
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"SMTP Server"}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
_ = level.Info(c.logger).Log("msg", "smtp collector is in an experimental state! Metrics for this collector have not been tested.")
c.BadmailedMessagesBadPickupFileTotal = prometheus.NewDesc(
c.badMailedMessagesBadPickupFileTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_bad_pickup_file_total"),
"Total number of malformed pickup messages sent to badmail",
[]string{"site"},
nil,
)
c.BadmailedMessagesGeneralFailureTotal = prometheus.NewDesc(
c.badMailedMessagesGeneralFailureTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_general_failure_total"),
"Total number of messages sent to badmail for reasons not associated with a specific counter",
[]string{"site"},
nil,
)
c.BadmailedMessagesHopCountExceededTotal = prometheus.NewDesc(
c.badMailedMessagesHopCountExceededTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_hop_count_exceeded_total"),
"Total number of messages sent to badmail because they had exceeded the maximum hop count",
[]string{"site"},
nil,
)
c.BadmailedMessagesNDROfDSNTotal = prometheus.NewDesc(
c.badMailedMessagesNDROfDSNTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_ndr_of_dns_total"),
"Total number of Delivery Status Notifications sent to badmail because they could not be delivered",
[]string{"site"},
nil,
)
c.BadmailedMessagesNoRecipientsTotal = prometheus.NewDesc(
c.badMailedMessagesNoRecipientsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_no_recipients_total"),
"Total number of messages sent to badmail because they had no recipients",
[]string{"site"},
nil,
)
c.BadmailedMessagesTriggeredViaEventTotal = prometheus.NewDesc(
c.badMailedMessagesTriggeredViaEventTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_triggered_via_event_total"),
"Total number of messages sent to badmail at the request of a server event sink",
[]string{"site"},
nil,
)
c.BytesSentTotal = prometheus.NewDesc(
c.bytesSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_sent_total"),
"Total number of bytes sent",
[]string{"site"},
nil,
)
c.BytesReceivedTotal = prometheus.NewDesc(
c.bytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"),
"Total number of bytes received",
[]string{"site"},
nil,
)
c.CategorizerQueueLength = prometheus.NewDesc(
c.categorizerQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "categorizer_queue_length"),
"Number of messages in the categorizer queue",
[]string{"site"},
nil,
)
c.ConnectionErrorsTotal = prometheus.NewDesc(
c.connectionErrorsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_errors_total"),
"Total number of connection errors",
[]string{"site"},
nil,
)
c.CurrentMessagesInLocalDelivery = prometheus.NewDesc(
c.currentMessagesInLocalDelivery = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_messages_in_local_delivery"),
"Number of messages that are currently being processed by a server event sink for local delivery",
[]string{"site"},
nil,
)
c.DirectoryDropsTotal = prometheus.NewDesc(
c.directoryDropsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "directory_drops_total"),
"Total number of messages placed in a drop directory",
[]string{"site"},
nil,
)
c.DSNFailuresTotal = prometheus.NewDesc(
c.dsnFailuresTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dsn_failures_total"),
"Total number of failed DSN generation attempts",
[]string{"site"},
nil,
)
c.DNSQueriesTotal = prometheus.NewDesc(
c.dnsQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dns_queries_total"),
"Total number of DNS lookups",
[]string{"site"},
nil,
)
c.ETRNMessagesTotal = prometheus.NewDesc(
c.etrnMessagesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "etrn_messages_total"),
"Total number of ETRN messages received by the server",
[]string{"site"},
nil,
)
c.InboundConnectionsCurrent = prometheus.NewDesc(
c.inboundConnectionsCurrent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "inbound_connections_current"),
"Total number of connections currently inbound",
[]string{"site"},
nil,
)
c.InboundConnectionsTotal = prometheus.NewDesc(
c.inboundConnectionsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "inbound_connections_total"),
"Total number of inbound connections received",
[]string{"site"},
nil,
)
c.LocalQueueLength = prometheus.NewDesc(
c.localQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "local_queue_length"),
"Number of messages in the local queue",
[]string{"site"},
nil,
)
c.LocalRetryQueueLength = prometheus.NewDesc(
c.localRetryQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "local_retry_queue_length"),
"Number of messages in the local retry queue",
[]string{"site"},
nil,
)
c.MailFilesOpen = prometheus.NewDesc(
c.mailFilesOpen = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mail_files_open"),
"Number of handles to open mail files",
[]string{"site"},
nil,
)
c.MessageBytesReceivedTotal = prometheus.NewDesc(
c.messageBytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "message_bytes_received_total"),
"Total number of bytes received in messages",
[]string{"site"},
nil,
)
c.MessageBytesSentTotal = prometheus.NewDesc(
c.messageBytesSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "message_bytes_sent_total"),
"Total number of bytes sent in messages",
[]string{"site"},
nil,
)
c.MessageDeliveryRetriesTotal = prometheus.NewDesc(
c.messageDeliveryRetriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "message_delivery_retries_total"),
"Total number of local deliveries that were retried",
[]string{"site"},
nil,
)
c.MessageSendRetriesTotal = prometheus.NewDesc(
c.messageSendRetriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "message_send_retries_total"),
"Total number of outbound message sends that were retried",
[]string{"site"},
nil,
)
c.MessagesCurrentlyUndeliverable = prometheus.NewDesc(
c.messagesCurrentlyUndeliverable = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_currently_undeliverable"),
"Number of messages that have been reported as currently undeliverable by routing",
[]string{"site"},
nil,
)
c.MessagesDeliveredTotal = prometheus.NewDesc(
c.messagesDeliveredTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_delivered_total"),
"Total number of messages delivered to local mailboxes",
[]string{"site"},
nil,
)
c.MessagesPendingRouting = prometheus.NewDesc(
c.messagesPendingRouting = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_pending_routing"),
"Number of messages that have been categorized but not routed",
[]string{"site"},
nil,
)
c.MessagesReceivedTotal = prometheus.NewDesc(
c.messagesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_received_total"),
"Total number of inbound messages accepted",
[]string{"site"},
nil,
)
c.MessagesRefusedForAddressObjectsTotal = prometheus.NewDesc(
c.messagesRefusedForAddressObjectsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_refused_for_address_objects_total"),
"Total number of messages refused due to no address objects",
[]string{"site"},
nil,
)
c.MessagesRefusedForMailObjectsTotal = prometheus.NewDesc(
c.messagesRefusedForMailObjectsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_refused_for_mail_objects_total"),
"Total number of messages refused due to no mail objects",
[]string{"site"},
nil,
)
c.MessagesRefusedForSizeTotal = prometheus.NewDesc(
c.messagesRefusedForSizeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_refused_for_size_total"),
"Total number of messages rejected because they were too big",
[]string{"site"},
nil,
)
c.MessagesSentTotal = prometheus.NewDesc(
c.messagesSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_sent_total"),
"Total number of outbound messages sent",
[]string{"site"},
nil,
)
c.MessagesSubmittedTotal = prometheus.NewDesc(
c.messagesSubmittedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "messages_submitted_total"),
"Total number of messages submitted to queuing for delivery",
[]string{"site"},
nil,
)
c.NDRsGeneratedTotal = prometheus.NewDesc(
c.ndrsGeneratedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ndrs_generated_total"),
"Total number of non-delivery reports that have been generated",
[]string{"site"},
nil,
)
c.OutboundConnectionsCurrent = prometheus.NewDesc(
c.outboundConnectionsCurrent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "outbound_connections_current"),
"Number of connections currently outbound",
[]string{"site"},
nil,
)
c.OutboundConnectionsRefusedTotal = prometheus.NewDesc(
c.outboundConnectionsRefusedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "outbound_connections_refused_total"),
"Total number of connection attempts refused by remote sites",
[]string{"site"},
nil,
)
c.OutboundConnectionsTotal = prometheus.NewDesc(
c.outboundConnectionsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "outbound_connections_total"),
"Total number of outbound connections attempted",
[]string{"site"},
nil,
)
c.PickupDirectoryMessagesRetrievedTotal = prometheus.NewDesc(
c.pickupDirectoryMessagesRetrievedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pickup_directory_messages_retrieved_total"),
"Total number of messages retrieved from the mail pick-up directory",
[]string{"site"},
nil,
)
c.QueueFilesOpen = prometheus.NewDesc(
c.queueFilesOpen = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "queue_files_open"),
"Number of handles to open queue files",
[]string{"site"},
nil,
)
c.RemoteQueueLength = prometheus.NewDesc(
c.remoteQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "remote_queue_length"),
"Number of messages in the remote queue",
[]string{"site"},
nil,
)
c.RemoteRetryQueueLength = prometheus.NewDesc(
c.remoteRetryQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "remote_retry_queue_length"),
"Number of messages in the retry queue for remote delivery",
[]string{"site"},
nil,
)
c.RoutingTableLookupsTotal = prometheus.NewDesc(
c.routingTableLookupsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "routing_table_lookups_total"),
"Total number of routing table lookups",
[]string{"site"},
nil,
)
var err error
c.serverIncludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.serverInclude))
if err != nil {
return err
}
c.serverExcludePattern, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", *c.serverExclude))
if err != nil {
return err
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting smtp metrics", "err", err)
return err
@@ -406,7 +417,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
return nil
}
// PerflibSMTPServer Perflib: "SMTP Server"
// PerflibSMTPServer Perflib: "SMTP Server".
type PerflibSMTPServer struct {
Name string
@@ -454,7 +465,7 @@ type PerflibSMTPServer struct {
RoutingTableLookupsTotal float64 `perflib:"Routing Table Lookups Total"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []PerflibSMTPServer
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMTP Server"], &dst, c.logger); err != nil {
return err
@@ -462,298 +473,297 @@ func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
for _, server := range dst {
if server.Name == "_Total" ||
c.serverExcludePattern.MatchString(server.Name) ||
!c.serverIncludePattern.MatchString(server.Name) {
c.config.ServerExclude.MatchString(server.Name) ||
!c.config.ServerInclude.MatchString(server.Name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesBadPickupFileTotal,
c.badMailedMessagesBadPickupFileTotal,
prometheus.CounterValue,
server.BadmailedMessagesBadPickupFileTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesHopCountExceededTotal,
c.badMailedMessagesHopCountExceededTotal,
prometheus.CounterValue,
server.BadmailedMessagesHopCountExceededTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesNDROfDSNTotal,
c.badMailedMessagesNDROfDSNTotal,
prometheus.CounterValue,
server.BadmailedMessagesNDROfDSNTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesNoRecipientsTotal,
c.badMailedMessagesNoRecipientsTotal,
prometheus.CounterValue,
server.BadmailedMessagesNoRecipientsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BadmailedMessagesTriggeredViaEventTotal,
c.badMailedMessagesTriggeredViaEventTotal,
prometheus.CounterValue,
server.BadmailedMessagesTriggeredViaEventTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BytesSentTotal,
c.bytesSentTotal,
prometheus.CounterValue,
server.BytesSentTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BytesReceivedTotal,
c.bytesReceivedTotal,
prometheus.CounterValue,
server.BytesReceivedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CategorizerQueueLength,
c.categorizerQueueLength,
prometheus.GaugeValue,
server.CategorizerQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionErrorsTotal,
c.connectionErrorsTotal,
prometheus.CounterValue,
server.ConnectionErrorsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentMessagesInLocalDelivery,
c.currentMessagesInLocalDelivery,
prometheus.GaugeValue,
server.CurrentMessagesInLocalDelivery,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DirectoryDropsTotal,
c.directoryDropsTotal,
prometheus.CounterValue,
server.DirectoryDropsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DSNFailuresTotal,
c.dsnFailuresTotal,
prometheus.CounterValue,
server.DSNFailuresTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.DNSQueriesTotal,
c.dnsQueriesTotal,
prometheus.CounterValue,
server.DNSQueriesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ETRNMessagesTotal,
c.etrnMessagesTotal,
prometheus.CounterValue,
server.ETRNMessagesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.InboundConnectionsTotal,
c.inboundConnectionsTotal,
prometheus.CounterValue,
server.InboundConnectionsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.InboundConnectionsCurrent,
c.inboundConnectionsCurrent,
prometheus.GaugeValue,
server.InboundConnectionsCurrent,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.LocalQueueLength,
c.localQueueLength,
prometheus.GaugeValue,
server.LocalQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.LocalRetryQueueLength,
c.localRetryQueueLength,
prometheus.GaugeValue,
server.LocalRetryQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MailFilesOpen,
c.mailFilesOpen,
prometheus.GaugeValue,
server.MailFilesOpen,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageBytesReceivedTotal,
c.messageBytesReceivedTotal,
prometheus.CounterValue,
server.MessageBytesReceivedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageBytesSentTotal,
c.messageBytesSentTotal,
prometheus.CounterValue,
server.MessageBytesSentTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageDeliveryRetriesTotal,
c.messageDeliveryRetriesTotal,
prometheus.CounterValue,
server.MessageDeliveryRetriesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessageSendRetriesTotal,
c.messageSendRetriesTotal,
prometheus.CounterValue,
server.MessageSendRetriesTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesCurrentlyUndeliverable,
c.messagesCurrentlyUndeliverable,
prometheus.GaugeValue,
server.MessagesCurrentlyUndeliverable,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesDeliveredTotal,
c.messagesDeliveredTotal,
prometheus.CounterValue,
server.MessagesDeliveredTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesPendingRouting,
c.messagesPendingRouting,
prometheus.GaugeValue,
server.MessagesPendingRouting,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesReceivedTotal,
c.messagesReceivedTotal,
prometheus.CounterValue,
server.MessagesReceivedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesRefusedForAddressObjectsTotal,
c.messagesRefusedForAddressObjectsTotal,
prometheus.CounterValue,
server.MessagesRefusedForAddressObjectsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesRefusedForMailObjectsTotal,
c.messagesRefusedForMailObjectsTotal,
prometheus.CounterValue,
server.MessagesRefusedForMailObjectsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesRefusedForSizeTotal,
c.messagesRefusedForSizeTotal,
prometheus.CounterValue,
server.MessagesRefusedForSizeTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesSentTotal,
c.messagesSentTotal,
prometheus.CounterValue,
server.MessagesSentTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.MessagesSubmittedTotal,
c.messagesSubmittedTotal,
prometheus.CounterValue,
server.MessagesSubmittedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.NDRsGeneratedTotal,
c.ndrsGeneratedTotal,
prometheus.CounterValue,
server.NDRsGeneratedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutboundConnectionsCurrent,
c.outboundConnectionsCurrent,
prometheus.GaugeValue,
server.OutboundConnectionsCurrent,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutboundConnectionsRefusedTotal,
c.outboundConnectionsRefusedTotal,
prometheus.CounterValue,
server.OutboundConnectionsRefusedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutboundConnectionsTotal,
c.outboundConnectionsTotal,
prometheus.CounterValue,
server.OutboundConnectionsTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.QueueFilesOpen,
c.queueFilesOpen,
prometheus.GaugeValue,
server.QueueFilesOpen,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PickupDirectoryMessagesRetrievedTotal,
c.pickupDirectoryMessagesRetrievedTotal,
prometheus.CounterValue,
server.PickupDirectoryMessagesRetrievedTotal,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RemoteQueueLength,
c.remoteQueueLength,
prometheus.GaugeValue,
server.RemoteQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RemoteRetryQueueLength,
c.remoteRetryQueueLength,
prometheus.GaugeValue,
server.RemoteRetryQueueLength,
server.Name,
)
ch <- prometheus.MustNewConstMetric(
c.RoutingTableLookupsTotal,
c.routingTableLookupsTotal,
prometheus.CounterValue,
server.RoutingTableLookupsTotal,
server.Name,
)
}
return nil
}

View File

@@ -17,72 +17,85 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI metrics.
type Collector struct {
config Config
logger log.Logger
ContextSwitchesTotal *prometheus.Desc
ExceptionDispatchesTotal *prometheus.Desc
ProcessorQueueLength *prometheus.Desc
SystemCallsTotal *prometheus.Desc
SystemUpTime *prometheus.Desc
Threads *prometheus.Desc
contextSwitchesTotal *prometheus.Desc
exceptionDispatchesTotal *prometheus.Desc
processorQueueLength *prometheus.Desc
systemCallsTotal *prometheus.Desc
systemUpTime *prometheus.Desc
threads *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"System"}, nil
}
func (c *collector) Build() error {
c.ContextSwitchesTotal = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.contextSwitchesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_switches_total"),
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
nil,
nil,
)
c.ExceptionDispatchesTotal = prometheus.NewDesc(
c.exceptionDispatchesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exception_dispatches_total"),
"Total number of exceptions dispatched (WMI source: PerfOS_System.ExceptionDispatchesPersec)",
nil,
nil,
)
c.ProcessorQueueLength = prometheus.NewDesc(
c.processorQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "processor_queue_length"),
"Length of processor queue (WMI source: PerfOS_System.ProcessorQueueLength)",
nil,
nil,
)
c.SystemCallsTotal = prometheus.NewDesc(
c.systemCallsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "system_calls_total"),
"Total number of system calls (WMI source: PerfOS_System.SystemCallsPersec)",
nil,
nil,
)
c.SystemUpTime = prometheus.NewDesc(
c.systemUpTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "system_up_time"),
"System boot time (WMI source: PerfOS_System.SystemUpTime)",
nil,
nil,
)
c.Threads = prometheus.NewDesc(
c.threads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "threads"),
"Current number of threads (WMI source: PerfOS_System.Threads)",
nil,
@@ -93,7 +106,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting system metrics", "err", err)
return err
@@ -112,39 +125,39 @@ type system struct {
Threads float64 `perflib:"Threads"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []system
if err := perflib.UnmarshalObject(ctx.PerfObjects["System"], &dst, c.logger); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.ContextSwitchesTotal,
c.contextSwitchesTotal,
prometheus.CounterValue,
dst[0].ContextSwitchesPersec,
)
ch <- prometheus.MustNewConstMetric(
c.ExceptionDispatchesTotal,
c.exceptionDispatchesTotal,
prometheus.CounterValue,
dst[0].ExceptionDispatchesPersec,
)
ch <- prometheus.MustNewConstMetric(
c.ProcessorQueueLength,
c.processorQueueLength,
prometheus.GaugeValue,
dst[0].ProcessorQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.SystemCallsTotal,
c.systemCallsTotal,
prometheus.CounterValue,
dst[0].SystemCallsPersec,
)
ch <- prometheus.MustNewConstMetric(
c.SystemUpTime,
c.systemUpTime,
prometheus.GaugeValue,
dst[0].SystemUpTime,
)
ch <- prometheus.MustNewConstMetric(
c.Threads,
c.threads,
prometheus.GaugeValue,
dst[0].Threads,
)

View File

@@ -17,93 +17,106 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_TCPv{4,6} metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Tcpip_TCPv{4,6} metrics.
type Collector struct {
config Config
logger log.Logger
ConnectionFailures *prometheus.Desc
ConnectionsActive *prometheus.Desc
ConnectionsEstablished *prometheus.Desc
ConnectionsPassive *prometheus.Desc
ConnectionsReset *prometheus.Desc
SegmentsTotal *prometheus.Desc
SegmentsReceivedTotal *prometheus.Desc
SegmentsRetransmittedTotal *prometheus.Desc
SegmentsSentTotal *prometheus.Desc
connectionFailures *prometheus.Desc
connectionsActive *prometheus.Desc
connectionsEstablished *prometheus.Desc
connectionsPassive *prometheus.Desc
connectionsReset *prometheus.Desc
segmentsTotal *prometheus.Desc
segmentsReceivedTotal *prometheus.Desc
segmentsRetransmittedTotal *prometheus.Desc
segmentsSentTotal *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"TCPv4"}, nil
}
func (c *collector) Build() error {
c.ConnectionFailures = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.connectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_failures_total"),
"(TCP.ConnectionFailures)",
[]string{"af"},
nil,
)
c.ConnectionsActive = prometheus.NewDesc(
c.connectionsActive = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connections_active_total"),
"(TCP.ConnectionsActive)",
[]string{"af"},
nil,
)
c.ConnectionsEstablished = prometheus.NewDesc(
c.connectionsEstablished = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connections_established"),
"(TCP.ConnectionsEstablished)",
[]string{"af"},
nil,
)
c.ConnectionsPassive = prometheus.NewDesc(
c.connectionsPassive = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connections_passive_total"),
"(TCP.ConnectionsPassive)",
[]string{"af"},
nil,
)
c.ConnectionsReset = prometheus.NewDesc(
c.connectionsReset = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connections_reset_total"),
"(TCP.ConnectionsReset)",
[]string{"af"},
nil,
)
c.SegmentsTotal = prometheus.NewDesc(
c.segmentsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "segments_total"),
"(TCP.SegmentsTotal)",
[]string{"af"},
nil,
)
c.SegmentsReceivedTotal = prometheus.NewDesc(
c.segmentsReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "segments_received_total"),
"(TCP.SegmentsReceivedTotal)",
[]string{"af"},
nil,
)
c.SegmentsRetransmittedTotal = prometheus.NewDesc(
c.segmentsRetransmittedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "segments_retransmitted_total"),
"(TCP.SegmentsRetransmittedTotal)",
[]string{"af"},
nil,
)
c.SegmentsSentTotal = prometheus.NewDesc(
c.segmentsSentTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "segments_sent_total"),
"(TCP.SegmentsSentTotal)",
[]string{"af"},
@@ -114,7 +127,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting tcp metrics", "err", err)
return err
@@ -137,64 +150,64 @@ type tcp struct {
SegmentsSentPersec float64 `perflib:"Segments Sent/sec"`
}
func writeTCPCounters(metrics tcp, labels []string, c *collector, ch chan<- prometheus.Metric) {
func writeTCPCounters(metrics tcp, labels []string, c *Collector, ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(
c.ConnectionFailures,
c.connectionFailures,
prometheus.CounterValue,
metrics.ConnectionFailures,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionsActive,
c.connectionsActive,
prometheus.CounterValue,
metrics.ConnectionsActive,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionsEstablished,
c.connectionsEstablished,
prometheus.GaugeValue,
metrics.ConnectionsEstablished,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionsPassive,
c.connectionsPassive,
prometheus.CounterValue,
metrics.ConnectionsPassive,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionsReset,
c.connectionsReset,
prometheus.CounterValue,
metrics.ConnectionsReset,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.SegmentsTotal,
c.segmentsTotal,
prometheus.CounterValue,
metrics.SegmentsPersec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.SegmentsReceivedTotal,
c.segmentsReceivedTotal,
prometheus.CounterValue,
metrics.SegmentsReceivedPersec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.SegmentsRetransmittedTotal,
c.segmentsRetransmittedTotal,
prometheus.CounterValue,
metrics.SegmentsRetransmittedPersec,
labels...,
)
ch <- prometheus.MustNewConstMetric(
c.SegmentsSentTotal,
c.segmentsSentTotal,
prometheus.CounterValue,
metrics.SegmentsSentPersec,
labels...,
)
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []tcp
// TCPv4 counters

View File

@@ -19,218 +19,233 @@ type Config struct{}
var ConfigDefaults = Config{}
// collector is a Prometheus collector for WMI metrics:
// Collector is a Prometheus Collector for WMI metrics:
// win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics
// win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics
// win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics
// win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics
// win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics
type collector struct {
// win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics.
type Collector struct {
config Config
logger log.Logger
AudioBytesReceived *prometheus.Desc
AudioBytesSent *prometheus.Desc
AudioRXBWkbitPersec *prometheus.Desc
AudioTXBWkbitPersec *prometheus.Desc
AudioTXBWLimitkbitPersec *prometheus.Desc
audioBytesReceived *prometheus.Desc
audioBytesSent *prometheus.Desc
audioRXBWKBitPerSec *prometheus.Desc
audioTXBWKBitPerSec *prometheus.Desc
audioTXBWLimitKBitPerSec *prometheus.Desc
BytesReceived *prometheus.Desc
BytesSent *prometheus.Desc
PacketsReceived *prometheus.Desc
PacketsSent *prometheus.Desc
RXPacketsLost *prometheus.Desc
SessionDurationSeconds *prometheus.Desc
TXPacketsLost *prometheus.Desc
bytesReceived *prometheus.Desc
bytesSent *prometheus.Desc
packetsReceived *prometheus.Desc
packetsSent *prometheus.Desc
rxPacketsLost *prometheus.Desc
sessionDurationSeconds *prometheus.Desc
txPacketsLost *prometheus.Desc
ImagingActiveMinimumQuality *prometheus.Desc
ImagingApex2800Offload *prometheus.Desc
ImagingBytesReceived *prometheus.Desc
ImagingBytesSent *prometheus.Desc
ImagingDecoderCapabilitykbitPersec *prometheus.Desc
ImagingEncodedFramesPersec *prometheus.Desc
ImagingMegapixelPersec *prometheus.Desc
ImagingNegativeAcknowledgements *prometheus.Desc
ImagingRXBWkbitPersec *prometheus.Desc
ImagingSVGAdevTapframesPersec *prometheus.Desc
ImagingTXBWkbitPersec *prometheus.Desc
imagingActiveMinimumQuality *prometheus.Desc
imagingApex2800Offload *prometheus.Desc
imagingBytesReceived *prometheus.Desc
imagingBytesSent *prometheus.Desc
imagingDecoderCapabilityKBitPerSec *prometheus.Desc
imagingEncodedFramesPerSec *prometheus.Desc
imagingMegapixelPerSec *prometheus.Desc
imagingNegativeAcknowledgements *prometheus.Desc
imagingRXBWKBitPerSec *prometheus.Desc
imagingSVGAdevTapframesPerSec *prometheus.Desc
imagingTXBWKBitPerSec *prometheus.Desc
RoundTripLatencyms *prometheus.Desc
RXBWkbitPersec *prometheus.Desc
RXBWPeakkbitPersec *prometheus.Desc
RXPacketLossPercent *prometheus.Desc
RXPacketLossPercent_Base *prometheus.Desc
TXBWActiveLimitkbitPersec *prometheus.Desc
TXBWkbitPersec *prometheus.Desc
TXBWLimitkbitPersec *prometheus.Desc
TXPacketLossPercent *prometheus.Desc
TXPacketLossPercent_Base *prometheus.Desc
rxBWKBitPerSec *prometheus.Desc
rxBWPeakKBitPerSec *prometheus.Desc
rxPacketLossPercent *prometheus.Desc
rxPacketLossPercentBase *prometheus.Desc
txBWActiveLimitKBitPerSec *prometheus.Desc
txBWKBitPerSec *prometheus.Desc
txBWLimitKBitPerSec *prometheus.Desc
txPacketLossPercent *prometheus.Desc
txPacketLossPercentBase *prometheus.Desc
USBBytesReceived *prometheus.Desc
USBBytesSent *prometheus.Desc
USBRXBWkbitPersec *prometheus.Desc
USBTXBWkbitPersec *prometheus.Desc
usbBytesReceived *prometheus.Desc
usbBytesSent *prometheus.Desc
usbRXBWKBitPerSec *prometheus.Desc
usbTXBWKBitPerSec *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.AudioBytesReceived = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
_ = level.Warn(c.logger).Log("msg", "teradici_pcoip collector is deprecated and will be removed in the future.")
c.audioBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_bytes_received_total"),
"(AudioBytesReceived)",
nil,
nil,
)
c.AudioBytesSent = prometheus.NewDesc(
c.audioBytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_bytes_sent_total"),
"(AudioBytesSent)",
nil,
nil,
)
c.AudioRXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_rx_bw_kbit_persec"),
"(AudioRXBWkbitPersec)",
c.audioRXBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_rx_bw_KBit_persec"),
"(AudioRXBWKBitPerSec)",
nil,
nil,
)
c.AudioTXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_tx_bw_kbit_persec"),
"(AudioTXBWkbitPersec)",
c.audioTXBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_tx_bw_KBit_persec"),
"(AudioTXBWKBitPerSec)",
nil,
nil,
)
c.AudioTXBWLimitkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_tx_bw_limit_kbit_persec"),
"(AudioTXBWLimitkbitPersec)",
c.audioTXBWLimitKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_tx_bw_limit_KBit_persec"),
"(AudioTXBWLimitKBitPerSec)",
nil,
nil,
)
c.BytesReceived = prometheus.NewDesc(
c.bytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"),
"(BytesReceived)",
nil,
nil,
)
c.BytesSent = prometheus.NewDesc(
c.bytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_sent_total"),
"(BytesSent)",
nil,
nil,
)
c.PacketsReceived = prometheus.NewDesc(
c.packetsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
"(PacketsReceived)",
nil,
nil,
)
c.PacketsSent = prometheus.NewDesc(
c.packetsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_sent_total"),
"(PacketsSent)",
nil,
nil,
)
c.RXPacketsLost = prometheus.NewDesc(
c.rxPacketsLost = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rx_packets_lost_total"),
"(RXPacketsLost)",
nil,
nil,
)
c.SessionDurationSeconds = prometheus.NewDesc(
c.sessionDurationSeconds = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "session_duration_seconds_total"),
"(SessionDurationSeconds)",
nil,
nil,
)
c.TXPacketsLost = prometheus.NewDesc(
c.txPacketsLost = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_packets_lost_total"),
"(TXPacketsLost)",
nil,
nil,
)
c.ImagingActiveMinimumQuality = prometheus.NewDesc(
c.imagingActiveMinimumQuality = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_active_min_quality"),
"(ImagingActiveMinimumQuality)",
nil,
nil,
)
c.ImagingApex2800Offload = prometheus.NewDesc(
c.imagingApex2800Offload = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_apex2800_offload"),
"(ImagingApex2800Offload)",
nil,
nil,
)
c.ImagingBytesReceived = prometheus.NewDesc(
c.imagingBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_bytes_received_total"),
"(ImagingBytesReceived)",
nil,
nil,
)
c.ImagingBytesSent = prometheus.NewDesc(
c.imagingBytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_bytes_sent_total"),
"(ImagingBytesSent)",
nil,
nil,
)
c.ImagingDecoderCapabilitykbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_decoder_capability_kbit_persec"),
"(ImagingDecoderCapabilitykbitPersec)",
c.imagingDecoderCapabilityKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_decoder_capability_KBit_persec"),
"(ImagingDecoderCapabilityKBitPerSec)",
nil,
nil,
)
c.ImagingEncodedFramesPersec = prometheus.NewDesc(
c.imagingEncodedFramesPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_encoded_frames_persec"),
"(ImagingEncodedFramesPersec)",
"(ImagingEncodedFramesPerSec)",
nil,
nil,
)
c.ImagingMegapixelPersec = prometheus.NewDesc(
c.imagingMegapixelPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_megapixel_persec"),
"(ImagingMegapixelPersec)",
"(ImagingMegapixelPerSec)",
nil,
nil,
)
c.ImagingNegativeAcknowledgements = prometheus.NewDesc(
c.imagingNegativeAcknowledgements = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_negative_acks_total"),
"(ImagingNegativeAcknowledgements)",
nil,
nil,
)
c.ImagingRXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_rx_bw_kbit_persec"),
"(ImagingRXBWkbitPersec)",
c.imagingRXBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_rx_bw_KBit_persec"),
"(ImagingRXBWKBitPerSec)",
nil,
nil,
)
c.ImagingSVGAdevTapframesPersec = prometheus.NewDesc(
c.imagingSVGAdevTapframesPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_svga_devtap_frames_persec"),
"(ImagingSVGAdevTapframesPersec)",
"(ImagingSVGAdevTapframesPerSec)",
nil,
nil,
)
c.ImagingTXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_tx_bw_kbit_persec"),
"(ImagingTXBWkbitPersec)",
c.imagingTXBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "imaging_tx_bw_KBit_persec"),
"(ImagingTXBWKBitPerSec)",
nil,
nil,
)
@@ -241,82 +256,82 @@ func (c *collector) Build() error {
nil,
nil,
)
c.RXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rx_bw_kbit_persec"),
"(RXBWkbitPersec)",
c.rxBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rx_bw_KBit_persec"),
"(RXBWKBitPerSec)",
nil,
nil,
)
c.RXBWPeakkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rx_bw_peak_kbit_persec"),
"(RXBWPeakkbitPersec)",
c.rxBWPeakKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rx_bw_peak_KBit_persec"),
"(RXBWPeakKBitPerSec)",
nil,
nil,
)
c.RXPacketLossPercent = prometheus.NewDesc(
c.rxPacketLossPercent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rx_packet_loss_percent"),
"(RXPacketLossPercent)",
nil,
nil,
)
c.RXPacketLossPercent_Base = prometheus.NewDesc(
c.rxPacketLossPercentBase = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rx_packet_loss_percent_base"),
"(RXPacketLossPercent_Base)",
nil,
nil,
)
c.TXBWActiveLimitkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_bw_active_limit_kbit_persec"),
"(TXBWActiveLimitkbitPersec)",
c.txBWActiveLimitKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_bw_active_limit_KBit_persec"),
"(TXBWActiveLimitKBitPerSec)",
nil,
nil,
)
c.TXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_bw_kbit_persec"),
"(TXBWkbitPersec)",
c.txBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_bw_KBit_persec"),
"(TXBWKBitPerSec)",
nil,
nil,
)
c.TXBWLimitkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_bw_limit_kbit_persec"),
"(TXBWLimitkbitPersec)",
c.txBWLimitKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_bw_limit_KBit_persec"),
"(TXBWLimitKBitPerSec)",
nil,
nil,
)
c.TXPacketLossPercent = prometheus.NewDesc(
c.txPacketLossPercent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_packet_loss_percent"),
"(TXPacketLossPercent)",
nil,
nil,
)
c.TXPacketLossPercent_Base = prometheus.NewDesc(
c.txPacketLossPercentBase = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "tx_packet_loss_percent_base"),
"(TXPacketLossPercent_Base)",
nil,
nil,
)
c.USBBytesReceived = prometheus.NewDesc(
c.usbBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "usb_bytes_received_total"),
"(USBBytesReceived)",
nil,
nil,
)
c.USBBytesSent = prometheus.NewDesc(
c.usbBytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "usb_bytes_sent_total"),
"(USBBytesSent)",
nil,
nil,
)
c.USBRXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "usb_rx_bw_kbit_persec"),
"(USBRXBWkbitPersec)",
c.usbRXBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "usb_rx_bw_KBit_persec"),
"(USBRXBWKBitPerSec)",
nil,
nil,
)
c.USBTXBWkbitPersec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "usb_tx_bw_kbit_persec"),
"(USBTXBWkbitPersec)",
c.usbTXBWKBitPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "usb_tx_bw_KBit_persec"),
"(USBTXBWKBitPerSec)",
nil,
nil,
)
@@ -325,7 +340,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectAudio(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session audio metrics", "err", err)
return err
@@ -352,9 +367,9 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
type win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics struct {
AudioBytesReceived uint64
AudioBytesSent uint64
AudioRXBWkbitPersec uint64
AudioTXBWkbitPersec uint64
AudioTXBWLimitkbitPersec uint64
AudioRXBWKBitPerSec uint64
AudioTXBWKBitPerSec uint64
AudioTXBWLimitKBitPerSec uint64
}
type win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics struct {
@@ -372,36 +387,36 @@ type win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics struct {
ImagingApex2800Offload uint32
ImagingBytesReceived uint64
ImagingBytesSent uint64
ImagingDecoderCapabilitykbitPersec uint32
ImagingEncodedFramesPersec uint32
ImagingMegapixelPersec uint32
ImagingDecoderCapabilityKBitPerSec uint32
ImagingEncodedFramesPerSec uint32
ImagingMegapixelPerSec uint32
ImagingNegativeAcknowledgements uint32
ImagingRXBWkbitPersec uint64
ImagingSVGAdevTapframesPersec uint32
ImagingTXBWkbitPersec uint64
ImagingRXBWKBitPerSec uint64
ImagingSVGAdevTapframesPerSec uint32
ImagingTXBWKBitPerSec uint64
}
type win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics struct {
RoundTripLatencyms uint32
RXBWkbitPersec uint64
RXBWPeakkbitPersec uint32
RXBWKBitPerSec uint64
RXBWPeakKBitPerSec uint32
RXPacketLossPercent uint32
RXPacketLossPercent_Base uint32
TXBWActiveLimitkbitPersec uint32
TXBWkbitPersec uint64
TXBWLimitkbitPersec uint32
RXPacketLossPercentBase uint32
TXBWActiveLimitKBitPerSec uint32
TXBWKBitPerSec uint64
TXBWLimitKBitPerSec uint32
TXPacketLossPercent uint32
TXPacketLossPercent_Base uint32
TXPacketLossPercentBase uint32
}
type win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics struct {
USBBytesReceived uint64
USBBytesSent uint64
USBRXBWkbitPersec uint64
USBTXBWkbitPersec uint64
USBRXBWKBitPerSec uint64
USBTXBWKBitPerSec uint64
}
func (c *collector) collectAudio(ch chan<- prometheus.Metric) error {
func (c *Collector) collectAudio(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -412,39 +427,39 @@ func (c *collector) collectAudio(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.AudioBytesReceived,
c.audioBytesReceived,
prometheus.CounterValue,
float64(dst[0].AudioBytesReceived),
)
ch <- prometheus.MustNewConstMetric(
c.AudioBytesSent,
c.audioBytesSent,
prometheus.CounterValue,
float64(dst[0].AudioBytesSent),
)
ch <- prometheus.MustNewConstMetric(
c.AudioRXBWkbitPersec,
c.audioRXBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].AudioRXBWkbitPersec),
float64(dst[0].AudioRXBWKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.AudioTXBWkbitPersec,
c.audioTXBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].AudioTXBWkbitPersec),
float64(dst[0].AudioTXBWKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.AudioTXBWLimitkbitPersec,
c.audioTXBWLimitKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].AudioTXBWLimitkbitPersec),
float64(dst[0].AudioTXBWLimitKBitPerSec),
)
return nil
}
func (c *collector) collectGeneral(ch chan<- prometheus.Metric) error {
func (c *Collector) collectGeneral(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -455,43 +470,43 @@ func (c *collector) collectGeneral(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.BytesReceived,
c.bytesReceived,
prometheus.CounterValue,
float64(dst[0].BytesReceived),
)
ch <- prometheus.MustNewConstMetric(
c.BytesSent,
c.bytesSent,
prometheus.CounterValue,
float64(dst[0].BytesSent),
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceived,
c.packetsReceived,
prometheus.CounterValue,
float64(dst[0].PacketsReceived),
)
ch <- prometheus.MustNewConstMetric(
c.PacketsSent,
c.packetsSent,
prometheus.CounterValue,
float64(dst[0].PacketsSent),
)
ch <- prometheus.MustNewConstMetric(
c.RXPacketsLost,
c.rxPacketsLost,
prometheus.CounterValue,
float64(dst[0].RXPacketsLost),
)
ch <- prometheus.MustNewConstMetric(
c.SessionDurationSeconds,
c.sessionDurationSeconds,
prometheus.CounterValue,
float64(dst[0].SessionDurationSeconds),
)
ch <- prometheus.MustNewConstMetric(
c.TXPacketsLost,
c.txPacketsLost,
prometheus.CounterValue,
float64(dst[0].TXPacketsLost),
)
@@ -499,7 +514,7 @@ func (c *collector) collectGeneral(ch chan<- prometheus.Metric) error {
return nil
}
func (c *collector) collectImaging(ch chan<- prometheus.Metric) error {
func (c *Collector) collectImaging(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -510,75 +525,75 @@ func (c *collector) collectImaging(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.ImagingActiveMinimumQuality,
c.imagingActiveMinimumQuality,
prometheus.GaugeValue,
float64(dst[0].ImagingActiveMinimumQuality),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingApex2800Offload,
c.imagingApex2800Offload,
prometheus.GaugeValue,
float64(dst[0].ImagingApex2800Offload),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingBytesReceived,
c.imagingBytesReceived,
prometheus.CounterValue,
float64(dst[0].ImagingBytesReceived),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingBytesSent,
c.imagingBytesSent,
prometheus.CounterValue,
float64(dst[0].ImagingBytesSent),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingDecoderCapabilitykbitPersec,
c.imagingDecoderCapabilityKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].ImagingDecoderCapabilitykbitPersec),
float64(dst[0].ImagingDecoderCapabilityKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingEncodedFramesPersec,
c.imagingEncodedFramesPerSec,
prometheus.GaugeValue,
float64(dst[0].ImagingEncodedFramesPersec),
float64(dst[0].ImagingEncodedFramesPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingMegapixelPersec,
c.imagingMegapixelPerSec,
prometheus.GaugeValue,
float64(dst[0].ImagingMegapixelPersec),
float64(dst[0].ImagingMegapixelPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingNegativeAcknowledgements,
c.imagingNegativeAcknowledgements,
prometheus.CounterValue,
float64(dst[0].ImagingNegativeAcknowledgements),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingRXBWkbitPersec,
c.imagingRXBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].ImagingRXBWkbitPersec),
float64(dst[0].ImagingRXBWKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingSVGAdevTapframesPersec,
c.imagingSVGAdevTapframesPerSec,
prometheus.GaugeValue,
float64(dst[0].ImagingSVGAdevTapframesPersec),
float64(dst[0].ImagingSVGAdevTapframesPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.ImagingTXBWkbitPersec,
c.imagingTXBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].ImagingTXBWkbitPersec),
float64(dst[0].ImagingTXBWKBitPerSec),
)
return nil
}
func (c *collector) collectNetwork(ch chan<- prometheus.Metric) error {
func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -595,63 +610,63 @@ func (c *collector) collectNetwork(ch chan<- prometheus.Metric) error {
)
ch <- prometheus.MustNewConstMetric(
c.RXBWkbitPersec,
c.rxBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].RXBWkbitPersec),
float64(dst[0].RXBWKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.RXBWPeakkbitPersec,
c.rxBWPeakKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].RXBWPeakkbitPersec),
float64(dst[0].RXBWPeakKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.RXPacketLossPercent,
c.rxPacketLossPercent,
prometheus.GaugeValue,
float64(dst[0].RXPacketLossPercent),
)
ch <- prometheus.MustNewConstMetric(
c.RXPacketLossPercent_Base,
c.rxPacketLossPercentBase,
prometheus.GaugeValue,
float64(dst[0].RXPacketLossPercent_Base),
float64(dst[0].RXPacketLossPercentBase),
)
ch <- prometheus.MustNewConstMetric(
c.TXBWActiveLimitkbitPersec,
c.txBWActiveLimitKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].TXBWActiveLimitkbitPersec),
float64(dst[0].TXBWActiveLimitKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.TXBWkbitPersec,
c.txBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].TXBWkbitPersec),
float64(dst[0].TXBWKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.TXBWLimitkbitPersec,
c.txBWLimitKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].TXBWLimitkbitPersec),
float64(dst[0].TXBWLimitKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.TXPacketLossPercent,
c.txPacketLossPercent,
prometheus.GaugeValue,
float64(dst[0].TXPacketLossPercent),
)
ch <- prometheus.MustNewConstMetric(
c.TXPacketLossPercent_Base,
c.txPacketLossPercentBase,
prometheus.GaugeValue,
float64(dst[0].TXPacketLossPercent_Base),
float64(dst[0].TXPacketLossPercentBase),
)
return nil
}
func (c *collector) collectUsb(ch chan<- prometheus.Metric) error {
func (c *Collector) collectUsb(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -662,27 +677,27 @@ func (c *collector) collectUsb(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.USBBytesReceived,
c.usbBytesReceived,
prometheus.CounterValue,
float64(dst[0].USBBytesReceived),
)
ch <- prometheus.MustNewConstMetric(
c.USBBytesSent,
c.usbBytesSent,
prometheus.CounterValue,
float64(dst[0].USBBytesSent),
)
ch <- prometheus.MustNewConstMetric(
c.USBRXBWkbitPersec,
c.usbRXBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].USBRXBWkbitPersec),
float64(dst[0].USBRXBWKBitPerSec),
)
ch <- prometheus.MustNewConstMetric(
c.USBTXBWkbitPersec,
c.usbTXBWKBitPerSec,
prometheus.GaugeValue,
float64(dst[0].USBTXBWkbitPersec),
float64(dst[0].USBTXBWKBitPerSec),
)
return nil

View File

@@ -46,148 +46,165 @@ func isConnectionBrokerServer(logger log.Logger) bool {
return false
}
// A collector is a Prometheus collector for WMI
// A Collector is a Prometheus Collector for WMI
// Win32_PerfRawData_LocalSessionManager_TerminalServices & Win32_PerfRawData_TermService_TerminalServicesSession metrics
// https://docs.microsoft.com/en-us/previous-versions/aa394344(v%3Dvs.85)
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/
type collector struct {
type Collector struct {
config Config
logger log.Logger
connectionBrokerEnabled bool
hServer syscall.Handle
SessionInfo *prometheus.Desc
LocalSessionCount *prometheus.Desc
ConnectionBrokerPerformance *prometheus.Desc
HandleCount *prometheus.Desc
PageFaultsPersec *prometheus.Desc
PageFileBytes *prometheus.Desc
PageFileBytesPeak *prometheus.Desc
PercentCPUTime *prometheus.Desc
PoolNonpagedBytes *prometheus.Desc
PoolPagedBytes *prometheus.Desc
PrivateBytes *prometheus.Desc
ThreadCount *prometheus.Desc
VirtualBytes *prometheus.Desc
VirtualBytesPeak *prometheus.Desc
WorkingSet *prometheus.Desc
WorkingSetPeak *prometheus.Desc
sessionInfo *prometheus.Desc
connectionBrokerPerformance *prometheus.Desc
handleCount *prometheus.Desc
pageFaultsPerSec *prometheus.Desc
pageFileBytes *prometheus.Desc
pageFileBytesPeak *prometheus.Desc
percentCPUTime *prometheus.Desc
poolNonPagedBytes *prometheus.Desc
poolPagedBytes *prometheus.Desc
privateBytes *prometheus.Desc
threadCount *prometheus.Desc
virtualBytes *prometheus.Desc
virtualBytesPeak *prometheus.Desc
workingSet *prometheus.Desc
workingSetPeak *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{
"Terminal Services Session",
"Remote Desktop Connection Broker Counterset",
}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
err := wtsapi32.WTSCloseServer(c.hServer)
if err != nil {
return fmt.Errorf("failed to close WTS server: %w", err)
}
return nil
}
func (c *Collector) Build() error {
c.connectionBrokerEnabled = isConnectionBrokerServer(c.logger)
c.SessionInfo = prometheus.NewDesc(
c.sessionInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "session_info"),
"Terminal Services sessions info",
[]string{"session_name", "user", "host", "state"},
nil,
)
c.ConnectionBrokerPerformance = prometheus.NewDesc(
c.connectionBrokerPerformance = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_broker_performance_total"),
"The total number of connections handled by the Connection Brokers since the service started.",
[]string{"connection"},
nil,
)
c.HandleCount = prometheus.NewDesc(
c.handleCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "handles"),
"Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process.",
[]string{"session_name"},
nil,
)
c.PageFaultsPersec = prometheus.NewDesc(
c.pageFaultsPerSec = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_fault_total"),
"Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page.",
[]string{"session_name"},
nil,
)
c.PageFileBytes = prometheus.NewDesc(
c.pageFileBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_file_bytes"),
"Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
[]string{"session_name"},
nil,
)
c.PageFileBytesPeak = prometheus.NewDesc(
c.pageFileBytesPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_file_bytes_peak"),
"Maximum number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
[]string{"session_name"},
nil,
)
c.PercentCPUTime = prometheus.NewDesc(
c.percentCPUTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_time_seconds_total"),
"Total elapsed time that this process's threads have spent executing code.",
[]string{"mode", "session_name"},
nil,
)
c.PoolNonpagedBytes = prometheus.NewDesc(
c.poolNonPagedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_non_paged_bytes"),
"Number of bytes in the non-paged pool, an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. This property displays the last observed value only; it is not an average.",
[]string{"session_name"},
nil,
)
c.PoolPagedBytes = prometheus.NewDesc(
c.poolPagedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_paged_bytes"),
"Number of bytes in the paged pool, an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. This property displays the last observed value only; it is not an average.",
[]string{"session_name"},
nil,
)
c.PrivateBytes = prometheus.NewDesc(
c.privateBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "private_bytes"),
"Current number of bytes this process has allocated that cannot be shared with other processes.",
[]string{"session_name"},
nil,
)
c.ThreadCount = prometheus.NewDesc(
c.threadCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "threads"),
"Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.",
[]string{"session_name"},
nil,
)
c.VirtualBytes = prometheus.NewDesc(
c.virtualBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "virtual_bytes"),
"Current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries.",
[]string{"session_name"},
nil,
)
c.VirtualBytesPeak = prometheus.NewDesc(
c.virtualBytesPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "virtual_bytes_peak"),
"Maximum number of bytes of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process might limit its ability to load libraries.",
[]string{"session_name"},
nil,
)
c.WorkingSet = prometheus.NewDesc(
c.workingSet = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_bytes"),
"Current number of bytes in the working set of this process. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
[]string{"session_name"},
nil,
)
c.WorkingSetPeak = prometheus.NewDesc(
c.workingSetPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_bytes_peak"),
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
[]string{"session_name"},
@@ -206,7 +223,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectWTSSessions(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session infos", "err", err)
return err
@@ -245,7 +262,7 @@ type perflibTerminalServicesSession struct {
WorkingSetPeak float64 `perflib:"Working Set Peak"`
}
func (c *collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibTerminalServicesSession, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Terminal Services Session"], &dst, c.logger)
if err != nil {
@@ -266,94 +283,94 @@ func (c *collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<-
names[n] = true
ch <- prometheus.MustNewConstMetric(
c.HandleCount,
c.handleCount,
prometheus.GaugeValue,
d.HandleCount,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PageFaultsPersec,
c.pageFaultsPerSec,
prometheus.CounterValue,
d.PageFaultsPersec,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PageFileBytes,
c.pageFileBytes,
prometheus.GaugeValue,
d.PageFileBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PageFileBytesPeak,
c.pageFileBytesPeak,
prometheus.GaugeValue,
d.PageFileBytesPeak,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PercentCPUTime,
c.percentCPUTime,
prometheus.CounterValue,
d.PercentPrivilegedTime,
d.Name,
"privileged",
)
ch <- prometheus.MustNewConstMetric(
c.PercentCPUTime,
c.percentCPUTime,
prometheus.CounterValue,
d.PercentProcessorTime,
d.Name,
"processor",
)
ch <- prometheus.MustNewConstMetric(
c.PercentCPUTime,
c.percentCPUTime,
prometheus.CounterValue,
d.PercentUserTime,
d.Name,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.PoolNonpagedBytes,
c.poolNonPagedBytes,
prometheus.GaugeValue,
d.PoolNonpagedBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PoolPagedBytes,
c.poolPagedBytes,
prometheus.GaugeValue,
d.PoolPagedBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PrivateBytes,
c.privateBytes,
prometheus.GaugeValue,
d.PrivateBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ThreadCount,
c.threadCount,
prometheus.GaugeValue,
d.ThreadCount,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VirtualBytes,
c.virtualBytes,
prometheus.GaugeValue,
d.VirtualBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VirtualBytesPeak,
c.virtualBytesPeak,
prometheus.GaugeValue,
d.VirtualBytesPeak,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSet,
c.workingSet,
prometheus.GaugeValue,
d.WorkingSet,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPeak,
c.workingSetPeak,
prometheus.GaugeValue,
d.WorkingSetPeak,
d.Name,
@@ -368,7 +385,7 @@ type perflibRemoteDesktopConnectionBrokerCounterset struct {
FailedConnections float64 `perflib:"Failed Connections"`
}
func (c *collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
dst := make([]perflibRemoteDesktopConnectionBrokerCounterset, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Remote Desktop Connection Broker Counterset"], &dst, c.logger)
if err != nil {
@@ -379,21 +396,21 @@ func (c *collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeC
}
ch <- prometheus.MustNewConstMetric(
c.ConnectionBrokerPerformance,
c.connectionBrokerPerformance,
prometheus.CounterValue,
dst[0].SuccessfulConnections,
"Successful",
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionBrokerPerformance,
c.connectionBrokerPerformance,
prometheus.CounterValue,
dst[0].PendingConnections,
"Pending",
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionBrokerPerformance,
c.connectionBrokerPerformance,
prometheus.CounterValue,
dst[0].FailedConnections,
"Failed",
@@ -402,7 +419,7 @@ func (c *collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeC
return nil
}
func (c *collector) collectWTSSessions(ch chan<- prometheus.Metric) error {
func (c *Collector) collectWTSSessions(ch chan<- prometheus.Metric) error {
sessions, err := wtsapi32.WTSEnumerateSessionsEx(c.hServer, c.logger)
if err != nil {
return fmt.Errorf("failed to enumerate WTS sessions: %w", err)
@@ -421,10 +438,10 @@ func (c *collector) collectWTSSessions(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.SessionInfo,
c.sessionInfo,
prometheus.GaugeValue,
isState,
strings.Replace(session.SessionName, "#", " ", -1),
strings.ReplaceAll(session.SessionName, "#", " "),
userName,
session.HostName,
stateName,

View File

@@ -8,7 +8,5 @@ import (
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, terminal_services.Name, terminal_services.NewWithFlags)
}

View File

@@ -31,79 +31,91 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
)
const (
Name = "textfile"
FlagTextFileDirectories = "collector.textfile.directories"
)
const Name = "textfile"
type Config struct {
TextFileDirectories string `yaml:"text_file_directories"`
TextFileDirectories []string `yaml:"text_file_directories"`
}
var ConfigDefaults = Config{
TextFileDirectories: getDefaultPath(),
TextFileDirectories: []string{getDefaultPath()},
}
type collector struct {
type Collector struct {
config Config
logger log.Logger
textFileDirectories *string
directories string
// Only set for testing to get predictable output.
mtime *float64
mTime *float64
MtimeDesc *prometheus.Desc
mTimeDesc *prometheus.Desc
}
func New(logger log.Logger, config *Config) types.Collector {
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &collector{
textFileDirectories: &config.TextFileDirectories,
if config.TextFileDirectories == nil {
config.TextFileDirectories = ConfigDefaults.TextFileDirectories
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(app *kingpin.Application) types.Collector {
return &collector{
textFileDirectories: app.Flag(
FlagTextFileDirectories,
"Directory or Directories to read text files with metrics from.",
).Default(ConfigDefaults.TextFileDirectories).String(),
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
var textFileDirectories string
app.Flag(
"collector.textfile.directories",
"Directory or Directories to read text files with metrics from.",
).Default(strings.Join(ConfigDefaults.TextFileDirectories, ",")).StringVar(&textFileDirectories)
app.Action(func(*kingpin.ParseContext) error {
c.config.TextFileDirectories = strings.Split(textFileDirectories, ",")
return nil
})
return c
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.directories = ""
if utils.HasValue(c.textFileDirectories) {
c.directories = strings.Trim(*c.textFileDirectories, ",")
}
func (c *Collector) Close() error {
return nil
}
_ = level.Info(c.logger).Log("msg", "textfile collector directories: "+c.directories)
func (c *Collector) Build() error {
_ = level.Info(c.logger).
Log("msg", "textfile Collector directories: "+strings.Join(c.config.TextFileDirectories, ","))
c.MtimeDesc = prometheus.NewDesc(
c.mTimeDesc = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "textfile", "mtime_seconds"),
"Unixtime mtime of textfiles successfully read.",
[]string{"file"},
@@ -118,32 +130,32 @@ func (c *collector) Build() error {
func duplicateMetricEntry(metricFamilies []*dto.MetricFamily) bool {
uniqueMetrics := make(map[string]map[string]string)
for _, metricFamily := range metricFamilies {
metric_name := *metricFamily.Name
for _, metric := range metricFamily.Metric {
metric_labels := metric.GetLabel()
metricName := metricFamily.GetName()
for _, metric := range metricFamily.GetMetric() {
metricLabels := metric.GetLabel()
labels := make(map[string]string)
for _, label := range metric_labels {
for _, label := range metricLabels {
labels[label.GetName()] = label.GetValue()
}
// Check if key is present before appending
_, mapContainsKey := uniqueMetrics[metric_name]
_, mapContainsKey := uniqueMetrics[metricName]
// Duplicate metric found with identical labels & label values
if mapContainsKey == true && reflect.DeepEqual(uniqueMetrics[metric_name], labels) {
if mapContainsKey && reflect.DeepEqual(uniqueMetrics[metricName], labels) {
return true
}
uniqueMetrics[metric_name] = labels
uniqueMetrics[metricName] = labels
}
}
return false
}
func (c *collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
func (c *Collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
var valType prometheus.ValueType
var val float64
allLabelNames := map[string]struct{}{}
for _, metric := range metricFamily.Metric {
for _, metric := range metricFamily.GetMetric() {
labels := metric.GetLabel()
for _, label := range labels {
if _, ok := allLabelNames[label.GetName()]; !ok {
@@ -152,9 +164,9 @@ func (c *collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<
}
}
for _, metric := range metricFamily.Metric {
for _, metric := range metricFamily.GetMetric() {
if metric.TimestampMs != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Ignoring unsupported custom timestamp on textfile Collector metric %v", metric))
}
labels := metric.GetLabel()
@@ -173,7 +185,7 @@ func (c *collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<
break
}
}
if present == false {
if !present {
names = append(names, k)
values = append(values, "")
}
@@ -183,44 +195,44 @@ func (c *collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<
switch metricType {
case dto.MetricType_COUNTER:
valType = prometheus.CounterValue
val = metric.Counter.GetValue()
val = metric.GetCounter().GetValue()
case dto.MetricType_GAUGE:
valType = prometheus.GaugeValue
val = metric.Gauge.GetValue()
val = metric.GetGauge().GetValue()
case dto.MetricType_UNTYPED:
valType = prometheus.UntypedValue
val = metric.Untyped.GetValue()
val = metric.GetUntyped().GetValue()
case dto.MetricType_SUMMARY:
quantiles := map[float64]float64{}
for _, q := range metric.Summary.Quantile {
for _, q := range metric.GetSummary().GetQuantile() {
quantiles[q.GetQuantile()] = q.GetValue()
}
ch <- prometheus.MustNewConstSummary(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetName(),
metricFamily.GetHelp(),
names, nil,
),
metric.Summary.GetSampleCount(),
metric.Summary.GetSampleSum(),
metric.GetSummary().GetSampleCount(),
metric.GetSummary().GetSampleSum(),
quantiles, values...,
)
case dto.MetricType_HISTOGRAM:
buckets := map[float64]uint64{}
for _, b := range metric.Histogram.Bucket {
for _, b := range metric.GetHistogram().GetBucket() {
buckets[b.GetUpperBound()] = b.GetCumulativeCount()
}
ch <- prometheus.MustNewConstHistogram(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetName(),
metricFamily.GetHelp(),
names, nil,
),
metric.Histogram.GetSampleCount(),
metric.Histogram.GetSampleSum(),
metric.GetHistogram().GetSampleCount(),
metric.GetHistogram().GetSampleSum(),
buckets, values...,
)
default:
@@ -230,7 +242,7 @@ func (c *collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetName(),
metricFamily.GetHelp(),
names, nil,
),
@@ -240,22 +252,22 @@ func (c *collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<
}
}
func (c *collector) exportMTimes(mtimes map[string]time.Time, ch chan<- prometheus.Metric) {
func (c *Collector) exportMTimes(mTimes map[string]time.Time, ch chan<- prometheus.Metric) {
// Export the mtimes of the successful files.
if len(mtimes) > 0 {
if len(mTimes) > 0 {
// Sorting is needed for predictable output comparison in tests.
filenames := make([]string, 0, len(mtimes))
for filename := range mtimes {
filenames := make([]string, 0, len(mTimes))
for filename := range mTimes {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
for _, filename := range filenames {
mtime := float64(mtimes[filename].UnixNano() / 1e9)
if c.mtime != nil {
mtime = *c.mtime
mtime := float64(mTimes[filename].UnixNano() / 1e9)
if c.mTime != nil {
mtime = *c.mTime
}
ch <- prometheus.MustNewConstMetric(c.MtimeDesc, prometheus.GaugeValue, mtime, filename)
ch <- prometheus.MustNewConstMetric(c.mTimeDesc, prometheus.GaugeValue, mtime, filename)
}
}
}
@@ -264,7 +276,7 @@ type carriageReturnFilteringReader struct {
r io.Reader
}
// Read returns data from the underlying io.Reader, but with \r filtered out
// Read returns data from the underlying io.Reader, but with \r filtered out.
func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
buf := make([]byte, len(p))
n, err := cr.r.Read(buf)
@@ -274,7 +286,7 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
}
pi := 0
for i := 0; i < n; i++ {
for i := range n {
if buf[i] != '\r' {
p[pi] = buf[i]
pi++
@@ -285,16 +297,17 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
}
// Collect implements the Collector interface.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
errorMetric := 0.0
mtimes := map[string]time.Time{}
mTimes := map[string]time.Time{}
// Create empty metricFamily slice here and append parsedFamilies to it inside the loop.
// Once loop is complete, raise error if any duplicates are present.
// This will ensure that duplicate metrics are correctly detected between multiple .prom files.
metricFamilies := []*dto.MetricFamily{}
var metricFamilies []*dto.MetricFamily
// Iterate over files and accumulate their metrics.
for _, directory := range strings.Split(c.directories, ",") {
for _, directory := range c.config.TextFileDirectories {
err := filepath.WalkDir(directory, func(path string, dirEntry os.DirEntry, err error) error {
if err != nil {
_ = level.Error(c.logger).Log("msg", "Error reading directory: "+path, "err", err)
@@ -315,18 +328,18 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
errorMetric = 1.0
return nil
}
if _, hasName := mtimes[fileInfo.Name()]; hasName {
if _, hasName := mTimes[fileInfo.Name()]; hasName {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Duplicate filename detected: %q. Skip File.", path))
errorMetric = 1.0
return nil
}
mtimes[fileInfo.Name()] = fileInfo.ModTime()
mTimes[fileInfo.Name()] = fileInfo.ModTime()
metricFamilies = append(metricFamilies, families_array...)
}
return nil
})
if err != nil && directory != "" {
_ = level.Error(c.logger).Log("msg", "Error reading textfile collector directory: "+c.directories, "err", err)
_ = level.Error(c.logger).Log("msg", "Error reading textfile Collector directory: "+directory, "err", err)
errorMetric = 1.0
}
}
@@ -341,7 +354,7 @@ func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
}
}
c.exportMTimes(mtimes, ch)
c.exportMTimes(mTimes, ch)
// Export if there were errors.
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
@@ -378,7 +391,7 @@ func scrapeFile(path string, log log.Logger) ([]*dto.MetricFamily, error) {
for _, mf := range parsedFamilies {
families_array = append(families_array, mf)
for _, m := range mf.Metric {
for _, m := range mf.GetMetric() {
if m.TimestampMs != nil {
return nil, errors.New("textfile contains unsupported client-side timestamps")
}

View File

@@ -10,6 +10,8 @@ import (
)
func TestCRFilter(t *testing.T) {
t.Parallel()
sr := strings.NewReader("line 1\r\nline 2")
cr := carriageReturnFilteringReader{r: sr}
b, err := io.ReadAll(cr)
@@ -23,6 +25,8 @@ func TestCRFilter(t *testing.T) {
}
func TestCheckBOM(t *testing.T) {
t.Parallel()
testdata := []struct {
encoding utfbom.Encoding
err string
@@ -49,6 +53,8 @@ func TestCheckBOM(t *testing.T) {
}
func TestDuplicateMetricEntry(t *testing.T) {
t.Parallel()
metric_name := "windows_sometest"
metric_help := "This is a Test."
metric_type := dto.MetricType_GAUGE

View File

@@ -6,28 +6,27 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/collector"
"github.com/prometheus-community/windows_exporter/pkg/collector/textfile"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
)
var baseDir = "../../../tools/textfile-test"
func TestMultipleDirectories(t *testing.T) {
t.Parallel()
testDir := baseDir + "/multiple-dirs"
testDirs := fmt.Sprintf("%[1]s/dir1,%[1]s/dir2,%[1]s/dir3", testDir)
textfileCollector := textfile.New(log.NewLogfmtLogger(os.Stdout), &textfile.Config{
TextFileDirectories: testDirs,
textFileCollector := textfile.New(log.NewLogfmtLogger(os.Stdout), &textfile.Config{
TextFileDirectories: strings.Split(testDirs, ","),
})
collectors := collector.New(map[string]types.Collector{textfile.Name: textfileCollector})
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
require.NoError(t, collectors.Build())
scrapeContext, err := collectors.PrepareScrapeContext()
@@ -48,7 +47,7 @@ func TestMultipleDirectories(t *testing.T) {
}
}()
err = textfileCollector.Collect(scrapeContext, metrics)
err = textFileCollector.Collect(scrapeContext, metrics)
if err != nil {
t.Errorf("Unexpected error %s", err)
}
@@ -61,12 +60,14 @@ func TestMultipleDirectories(t *testing.T) {
}
func TestDuplicateFileName(t *testing.T) {
t.Parallel()
testDir := baseDir + "/duplicate-filename"
textfileCollector := textfile.New(log.NewLogfmtLogger(os.Stdout), &textfile.Config{
TextFileDirectories: testDir,
textFileCollector := textfile.New(log.NewLogfmtLogger(os.Stdout), &textfile.Config{
TextFileDirectories: []string{testDir},
})
collectors := collector.New(map[string]types.Collector{textfile.Name: textfileCollector})
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
require.NoError(t, collectors.Build())
scrapeContext, err := collectors.PrepareScrapeContext()
@@ -86,13 +87,15 @@ func TestDuplicateFileName(t *testing.T) {
got += metric.String()
}
}()
err = textfileCollector.Collect(scrapeContext, metrics)
err = textFileCollector.Collect(scrapeContext, metrics)
if err != nil {
t.Errorf("Unexpected error %s", err)
}
if !strings.Contains(got, "file") {
t.Errorf("Unexpected output %q", got)
}
if strings.Contains(got, "sub_file") {
t.Errorf("Unexpected output %q", got)
}

View File

@@ -19,39 +19,52 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics.
type Collector struct {
config Config
logger log.Logger
PercentPassiveLimit *prometheus.Desc
Temperature *prometheus.Desc
ThrottleReasons *prometheus.Desc
percentPassiveLimit *prometheus.Desc
temperature *prometheus.Desc
throttleReasons *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.Temperature = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.temperature = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"),
"(Temperature)",
[]string{
@@ -59,7 +72,7 @@ func (c *collector) Build() error {
},
nil,
)
c.PercentPassiveLimit = prometheus.NewDesc(
c.percentPassiveLimit = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "percent_passive_limit"),
"(PercentPassiveLimit)",
[]string{
@@ -67,7 +80,7 @@ func (c *collector) Build() error {
},
nil,
)
c.ThrottleReasons = prometheus.NewDesc(
c.throttleReasons = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "throttle_reasons"),
"(ThrottleReasons)",
[]string{
@@ -80,7 +93,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting thermalzone metrics", "err", err)
return err
@@ -98,7 +111,7 @@ type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
ThrottleReasons uint32
}
func (c *collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -113,21 +126,21 @@ func (c *collector) collect(ch chan<- prometheus.Metric) error {
for _, info := range dst {
// Divide by 10 and subtract 273.15 to convert decikelvin to celsius
ch <- prometheus.MustNewConstMetric(
c.Temperature,
c.temperature,
prometheus.GaugeValue,
(float64(info.HighPrecisionTemperature)/10.0)-273.15,
info.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PercentPassiveLimit,
c.percentPassiveLimit,
prometheus.GaugeValue,
float64(info.PercentPassiveLimit),
info.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ThrottleReasons,
c.throttleReasons,
prometheus.GaugeValue,
float64(info.ThrottleReasons),
info.Name,

View File

@@ -20,76 +20,89 @@ type Config struct{}
var ConfigDefaults = Config{}
// collector is a Prometheus collector for Perflib counter metrics
type collector struct {
// Collector is a Prometheus Collector for Perflib counter metrics.
type Collector struct {
config Config
logger log.Logger
ClockFrequencyAdjustmentPPBTotal *prometheus.Desc
ComputedTimeOffset *prometheus.Desc
NTPClientTimeSourceCount *prometheus.Desc
NTPRoundtripDelay *prometheus.Desc
NTPServerIncomingRequestsTotal *prometheus.Desc
NTPServerOutgoingResponsesTotal *prometheus.Desc
clockFrequencyAdjustmentPPBTotal *prometheus.Desc
computedTimeOffset *prometheus.Desc
ntpClientTimeSourceCount *prometheus.Desc
ntpRoundTripDelay *prometheus.Desc
ntpServerIncomingRequestsTotal *prometheus.Desc
ntpServerOutgoingResponsesTotal *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Windows Time Service"}, nil
}
func (c *collector) Build() error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
if winversion.WindowsVersionFloat <= 6.1 {
return errors.New("Windows version older than Server 2016 detected. The time collector will not run and should be disabled via CLI flags or configuration file")
}
c.ClockFrequencyAdjustmentPPBTotal = prometheus.NewDesc(
c.clockFrequencyAdjustmentPPBTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "clock_frequency_adjustment_ppb_total"),
"Total adjustment made to the local system clock frequency by W32Time in Parts Per Billion (PPB) units.",
nil,
nil,
)
c.ComputedTimeOffset = prometheus.NewDesc(
c.computedTimeOffset = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "computed_time_offset_seconds"),
"Absolute time offset between the system clock and the chosen time source, in seconds",
nil,
nil,
)
c.NTPClientTimeSourceCount = prometheus.NewDesc(
c.ntpClientTimeSourceCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ntp_client_time_sources"),
"Active number of NTP Time sources being used by the client",
nil,
nil,
)
c.NTPRoundtripDelay = prometheus.NewDesc(
c.ntpRoundTripDelay = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ntp_round_trip_delay_seconds"),
"Roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds",
nil,
nil,
)
c.NTPServerOutgoingResponsesTotal = prometheus.NewDesc(
c.ntpServerOutgoingResponsesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ntp_server_outgoing_responses_total"),
"Total number of requests responded to by NTP server",
nil,
nil,
)
c.NTPServerIncomingRequestsTotal = prometheus.NewDesc(
c.ntpServerIncomingRequestsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ntp_server_incoming_requests_total"),
"Total number of requests received by NTP server",
nil,
@@ -100,7 +113,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting time metrics", "err", err)
return err
@@ -108,7 +121,7 @@ func (c *collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
return nil
}
// Perflib "Windows Time Service"
// Perflib "Windows Time Service".
type windowsTime struct {
ClockFrequencyAdjustmentPPBTotal float64 `perflib:"Clock Frequency Adjustment (ppb)"`
ComputedTimeOffset float64 `perflib:"Computed Time Offset"`
@@ -118,39 +131,39 @@ type windowsTime struct {
NTPServerOutgoingResponsesTotal float64 `perflib:"NTP Server Outgoing Responses"`
}
func (c *collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []windowsTime // Single-instance class, array is required but will have single entry.
if err := perflib.UnmarshalObject(ctx.PerfObjects["Windows Time Service"], &dst, c.logger); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.ClockFrequencyAdjustmentPPBTotal,
c.clockFrequencyAdjustmentPPBTotal,
prometheus.CounterValue,
dst[0].ClockFrequencyAdjustmentPPBTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ComputedTimeOffset,
c.computedTimeOffset,
prometheus.GaugeValue,
dst[0].ComputedTimeOffset/1000000, // microseconds -> seconds
)
ch <- prometheus.MustNewConstMetric(
c.NTPClientTimeSourceCount,
c.ntpClientTimeSourceCount,
prometheus.GaugeValue,
dst[0].NTPClientTimeSourceCount,
)
ch <- prometheus.MustNewConstMetric(
c.NTPRoundtripDelay,
c.ntpRoundTripDelay,
prometheus.GaugeValue,
dst[0].NTPRoundtripDelay/1000000, // microseconds -> seconds
)
ch <- prometheus.MustNewConstMetric(
c.NTPServerIncomingRequestsTotal,
c.ntpServerIncomingRequestsTotal,
prometheus.CounterValue,
dst[0].NTPServerIncomingRequestsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.NTPServerOutgoingResponsesTotal,
c.ntpServerOutgoingResponsesTotal,
prometheus.CounterValue,
dst[0].NTPServerOutgoingResponsesTotal,
)

36
pkg/collector/types.go Normal file
View File

@@ -0,0 +1,36 @@
package collector
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
type Collectors struct {
logger log.Logger
collectors Map
perfCounterQuery string
}
type Map map[string]Collector
type (
Builder func(logger log.Logger) Collector
BuilderWithFlags[C Collector] func(*kingpin.Application) C
)
// Collector interface that a collector has to implement.
type Collector interface {
Build() error
// Close closes the collector
Close() error
// GetName get the name of the collector
GetName() string
// GetPerfCounter returns the perf counter required by the collector
GetPerfCounter() ([]string, error)
// Collect Get new metrics and expose them via prometheus registry.
Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (err error)
SetLogger(logger log.Logger)
}

View File

@@ -20,165 +20,178 @@ type Config struct{}
var ConfigDefaults = Config{}
// A collector is a Prometheus collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics
type collector struct {
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics.
type Collector struct {
config Config
logger log.Logger
MemActive *prometheus.Desc
MemBallooned *prometheus.Desc
MemLimit *prometheus.Desc
MemMapped *prometheus.Desc
MemOverhead *prometheus.Desc
MemReservation *prometheus.Desc
MemShared *prometheus.Desc
MemSharedSaved *prometheus.Desc
MemShares *prometheus.Desc
MemSwapped *prometheus.Desc
MemTargetSize *prometheus.Desc
MemUsed *prometheus.Desc
memActive *prometheus.Desc
memBallooned *prometheus.Desc
memLimit *prometheus.Desc
memMapped *prometheus.Desc
memOverhead *prometheus.Desc
memReservation *prometheus.Desc
memShared *prometheus.Desc
memSharedSaved *prometheus.Desc
memShares *prometheus.Desc
memSwapped *prometheus.Desc
memTargetSize *prometheus.Desc
memUsed *prometheus.Desc
CpuLimitMHz *prometheus.Desc
CpuReservationMHz *prometheus.Desc
CpuShares *prometheus.Desc
CpuStolenTotal *prometheus.Desc
CpuTimeTotal *prometheus.Desc
EffectiveVMSpeedMHz *prometheus.Desc
HostProcessorSpeedMHz *prometheus.Desc
cpuLimitMHz *prometheus.Desc
cpuReservationMHz *prometheus.Desc
cpuShares *prometheus.Desc
cpuStolenTotal *prometheus.Desc
cpuTimeTotal *prometheus.Desc
effectiveVMSpeedMHz *prometheus.Desc
hostProcessorSpeedMHz *prometheus.Desc
}
func New(logger log.Logger, _ *Config) types.Collector {
c := &collector{}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) types.Collector {
return &collector{}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *collector) GetName() string {
func (c *Collector) GetName() string {
return Name
}
func (c *collector) SetLogger(logger log.Logger) {
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *collector) GetPerfCounter() ([]string, error) {
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil
}
func (c *collector) Build() error {
c.MemActive = prometheus.NewDesc(
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.memActive = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_active_bytes"),
"(MemActiveMB)",
nil,
nil,
)
c.MemBallooned = prometheus.NewDesc(
c.memBallooned = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_ballooned_bytes"),
"(MemBalloonedMB)",
nil,
nil,
)
c.MemLimit = prometheus.NewDesc(
c.memLimit = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_limit_bytes"),
"(MemLimitMB)",
nil,
nil,
)
c.MemMapped = prometheus.NewDesc(
c.memMapped = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_mapped_bytes"),
"(MemMappedMB)",
nil,
nil,
)
c.MemOverhead = prometheus.NewDesc(
c.memOverhead = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_overhead_bytes"),
"(MemOverheadMB)",
nil,
nil,
)
c.MemReservation = prometheus.NewDesc(
c.memReservation = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_reservation_bytes"),
"(MemReservationMB)",
nil,
nil,
)
c.MemShared = prometheus.NewDesc(
c.memShared = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_shared_bytes"),
"(MemSharedMB)",
nil,
nil,
)
c.MemSharedSaved = prometheus.NewDesc(
c.memSharedSaved = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_shared_saved_bytes"),
"(MemSharedSavedMB)",
nil,
nil,
)
c.MemShares = prometheus.NewDesc(
c.memShares = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_shares"),
"(MemShares)",
nil,
nil,
)
c.MemSwapped = prometheus.NewDesc(
c.memSwapped = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_swapped_bytes"),
"(MemSwappedMB)",
nil,
nil,
)
c.MemTargetSize = prometheus.NewDesc(
c.memTargetSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_target_size_bytes"),
"(MemTargetSizeMB)",
nil,
nil,
)
c.MemUsed = prometheus.NewDesc(
c.memUsed = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_used_bytes"),
"(MemUsedMB)",
nil,
nil,
)
c.CpuLimitMHz = prometheus.NewDesc(
c.cpuLimitMHz = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_limit_mhz"),
"(CpuLimitMHz)",
nil,
nil,
)
c.CpuReservationMHz = prometheus.NewDesc(
c.cpuReservationMHz = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_reservation_mhz"),
"(CpuReservationMHz)",
nil,
nil,
)
c.CpuShares = prometheus.NewDesc(
c.cpuShares = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_shares"),
"(CpuShares)",
nil,
nil,
)
c.CpuStolenTotal = prometheus.NewDesc(
c.cpuStolenTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_stolen_seconds_total"),
"(CpuStolenMs)",
nil,
nil,
)
c.CpuTimeTotal = prometheus.NewDesc(
c.cpuTimeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_time_seconds_total"),
"(CpuTimePercents)",
nil,
nil,
)
c.EffectiveVMSpeedMHz = prometheus.NewDesc(
c.effectiveVMSpeedMHz = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "effective_vm_speed_mhz"),
"(EffectiveVMSpeedMHz)",
nil,
nil,
)
c.HostProcessorSpeedMHz = prometheus.NewDesc(
c.hostProcessorSpeedMHz = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "host_processor_speed_mhz"),
"(HostProcessorSpeedMHz)",
nil,
@@ -189,7 +202,7 @@ func (c *collector) Build() error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
if err := c.collectMem(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware memory metrics", "err", err)
return err
@@ -226,7 +239,7 @@ type Win32_PerfRawData_vmGuestLib_VCPU struct {
HostProcessorSpeedMHz uint64
}
func (c *collector) collectMem(ch chan<- prometheus.Metric) error {
func (c *Collector) collectMem(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_vmGuestLib_VMem
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -237,73 +250,73 @@ func (c *collector) collectMem(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.MemActive,
c.memActive,
prometheus.GaugeValue,
mbToBytes(dst[0].MemActiveMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemBallooned,
c.memBallooned,
prometheus.GaugeValue,
mbToBytes(dst[0].MemBalloonedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemLimit,
c.memLimit,
prometheus.GaugeValue,
mbToBytes(dst[0].MemLimitMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemMapped,
c.memMapped,
prometheus.GaugeValue,
mbToBytes(dst[0].MemMappedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemOverhead,
c.memOverhead,
prometheus.GaugeValue,
mbToBytes(dst[0].MemOverheadMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemReservation,
c.memReservation,
prometheus.GaugeValue,
mbToBytes(dst[0].MemReservationMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemShared,
c.memShared,
prometheus.GaugeValue,
mbToBytes(dst[0].MemSharedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemSharedSaved,
c.memSharedSaved,
prometheus.GaugeValue,
mbToBytes(dst[0].MemSharedSavedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemShares,
c.memShares,
prometheus.GaugeValue,
float64(dst[0].MemShares),
)
ch <- prometheus.MustNewConstMetric(
c.MemSwapped,
c.memSwapped,
prometheus.GaugeValue,
mbToBytes(dst[0].MemSwappedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemTargetSize,
c.memTargetSize,
prometheus.GaugeValue,
mbToBytes(dst[0].MemTargetSizeMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemUsed,
c.memUsed,
prometheus.GaugeValue,
mbToBytes(dst[0].MemUsedMB),
)
@@ -315,7 +328,7 @@ func mbToBytes(mb uint64) float64 {
return float64(mb * 1024 * 1024)
}
func (c *collector) collectCpu(ch chan<- prometheus.Metric) error {
func (c *Collector) collectCpu(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_vmGuestLib_VCPU
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.Query(q, &dst); err != nil {
@@ -326,43 +339,43 @@ func (c *collector) collectCpu(ch chan<- prometheus.Metric) error {
}
ch <- prometheus.MustNewConstMetric(
c.CpuLimitMHz,
c.cpuLimitMHz,
prometheus.GaugeValue,
float64(dst[0].CpuLimitMHz),
)
ch <- prometheus.MustNewConstMetric(
c.CpuReservationMHz,
c.cpuReservationMHz,
prometheus.GaugeValue,
float64(dst[0].CpuReservationMHz),
)
ch <- prometheus.MustNewConstMetric(
c.CpuShares,
c.cpuShares,
prometheus.GaugeValue,
float64(dst[0].CpuShares),
)
ch <- prometheus.MustNewConstMetric(
c.CpuStolenTotal,
c.cpuStolenTotal,
prometheus.CounterValue,
float64(dst[0].CpuStolenMs)*perflib.TicksToSecondScaleFactor,
)
ch <- prometheus.MustNewConstMetric(
c.CpuTimeTotal,
c.cpuTimeTotal,
prometheus.CounterValue,
float64(dst[0].CpuTimePercents)*perflib.TicksToSecondScaleFactor,
)
ch <- prometheus.MustNewConstMetric(
c.EffectiveVMSpeedMHz,
c.effectiveVMSpeedMHz,
prometheus.GaugeValue,
float64(dst[0].EffectiveVMSpeedMHz),
)
ch <- prometheus.MustNewConstMetric(
c.HostProcessorSpeedMHz,
c.hostProcessorSpeedMHz,
prometheus.GaugeValue,
float64(dst[0].HostProcessorSpeedMHz),
)

File diff suppressed because it is too large Load Diff

View File

@@ -37,44 +37,29 @@ type Resolver struct {
}
// NewResolver returns a Resolver structure.
func NewResolver(file string, logger log.Logger, insecure_skip_verify bool) (*Resolver, error) {
func NewResolver(file string, logger log.Logger, insecureSkipVerify bool) (*Resolver, error) {
flags := map[string]string{}
var fileBytes []byte
var err error
if strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://") {
_ = level.Info(logger).Log("msg", fmt.Sprintf("Loading configuration file from URL: %v", file))
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure_skip_verify},
}
if insecure_skip_verify {
_ = level.Warn(logger).Log("msg", "Loading configuration file with TLS verification disabled")
}
client := &http.Client{Transport: tr}
resp, err := client.Get(file)
if err != nil {
return nil, err
}
defer resp.Body.Close()
fileBytes, err = io.ReadAll(resp.Body)
fileBytes, err = readFromURL(file, logger, insecureSkipVerify)
if err != nil {
return nil, err
}
} else {
_ = level.Info(logger).Log("msg", fmt.Sprintf("Loading configuration file: %v", file))
if _, err := os.Stat(file); err != nil {
return nil, err
}
fileBytes, err = os.ReadFile(file)
fileBytes, err = readFromFile(file, logger)
if err != nil {
return nil, err
}
}
var rawValues map[string]interface{}
err = yaml.Unmarshal(fileBytes, &rawValues)
if err != nil {
return nil, err
}
// Flatten nested YAML values
flattenedValues := flatten(rawValues)
for k, v := range flattenedValues {
@@ -82,9 +67,51 @@ func NewResolver(file string, logger log.Logger, insecure_skip_verify bool) (*Re
flags[k] = v
}
}
return &Resolver{flags: flags}, nil
}
func readFromFile(file string, logger log.Logger) ([]byte, error) {
_ = level.Info(logger).Log("msg", fmt.Sprintf("Loading configuration file: %v", file))
if _, err := os.Stat(file); err != nil {
return nil, err
}
fileBytes, err := os.ReadFile(file)
if err != nil {
return nil, err
}
return fileBytes, err
}
func readFromURL(file string, logger log.Logger, insecureSkipVerify bool) ([]byte, error) {
_ = level.Info(logger).Log("msg", fmt.Sprintf("Loading configuration file from URL: %v", file))
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, //nolint:gosec
}
if insecureSkipVerify {
_ = level.Warn(logger).Log("msg", "Loading configuration file with TLS verification disabled")
}
client := &http.Client{Transport: tr}
resp, err := client.Get(file)
if err != nil {
return nil, err
}
defer resp.Body.Close()
fileBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return fileBytes, nil
}
func (c *Resolver) setDefault(v getFlagger) {
for name, value := range c.flags {
f := v.GetFlag(name)

View File

@@ -9,7 +9,7 @@ import (
//
// All keys will be joined by dot
// e.g. {"a": {"b":"c"}} => {"a.b":"c"}
// or {"a": {"b":[1,2]}} => {"a.b.0":1, "a.b.1": 2}
// or {"a": {"b":[1,2]}} => {"a.b.0":1, "a.b.1": 2}.
func flatten(data map[string]interface{}) map[string]string {
ret := make(map[string]string)
for k, v := range data {
@@ -32,6 +32,7 @@ func flatten(data map[string]interface{}) map[string]string {
}
return ret
}
func flattenSlice(data []interface{}) map[string]string {
ret := make(map[string]string)
for idx, v := range data {
@@ -58,7 +59,9 @@ func flattenSlice(data []interface{}) map[string]string {
func convertMap(originalMap map[interface{}]interface{}) map[string]interface{} {
convertedMap := map[string]interface{}{}
for key, value := range originalMap {
convertedMap[key.(string)] = value
if keyString, ok := key.(string); ok {
convertedMap[keyString] = value
}
}
return convertedMap
}

View File

@@ -7,8 +7,10 @@ import (
"gopkg.in/yaml.v3"
)
// Unmarshal good configuration file and confirm data is flattened correctly
// Unmarshal good configuration file and confirm data is flattened correctly.
func TestConfigFlattening(t *testing.T) {
t.Parallel()
goodYamlConfig := []byte(`---
collectors:

View File

@@ -19,7 +19,7 @@ type wKSTAInfo102 struct {
wki102_logged_on_users uint32
}
// WorkstationInfo is an idiomatic wrapper of WKSTAInfo102
// WorkstationInfo is an idiomatic wrapper of WKSTAInfo102.
type WorkstationInfo struct {
PlatformId uint32
ComputerName string
@@ -89,7 +89,7 @@ func netWkstaGetInfo() (wKSTAInfo102, uint32, error) {
return deref, 0, nil
}
// GetWorkstationInfo is an idiomatic wrapper for netWkstaGetInfo
// GetWorkstationInfo is an idiomatic wrapper for netWkstaGetInfo.
func GetWorkstationInfo() (WorkstationInfo, error) {
info, _, err := netWkstaGetInfo()
if err != nil {

View File

@@ -24,7 +24,7 @@ const (
SL_GEN_STATE_LAST
)
// SLIsWindowsGenuineLocal function wrapper
// SLIsWindowsGenuineLocal function wrapper.
func SLIsWindowsGenuineLocal() (SL_GENUINE_STATE, error) {
var genuineState SL_GENUINE_STATE

View File

@@ -21,7 +21,7 @@ type memoryStatusEx struct {
UllAvailExtendedVirtual uint64
}
// MemoryStatus is an idiomatic wrapper for MemoryStatusEx
// MemoryStatus is an idiomatic wrapper for MemoryStatusEx.
type MemoryStatus struct {
MemoryLoad uint32
TotalPhys uint64
@@ -40,17 +40,17 @@ type wProcessorArchitecture struct {
WReserved uint16
}
// ProcessorArchitecture is an idiomatic wrapper for wProcessorArchitecture
// ProcessorArchitecture is an idiomatic wrapper for wProcessorArchitecture.
type ProcessorArchitecture uint16
// Idiomatic values for wProcessorArchitecture
// Idiomatic values for wProcessorArchitecture.
const (
AMD64 ProcessorArchitecture = 9
ARM = 5
ARM64 = 12
IA64 = 6
INTEL = 0
UNKNOWN = 0xffff
ARM ProcessorArchitecture = 5
ARM64 ProcessorArchitecture = 12
IA64 ProcessorArchitecture = 6
INTEL ProcessorArchitecture = 0
UNKNOWN ProcessorArchitecture = 0xffff
)
// LpSystemInfo is a wrapper for LPSYSTEM_INFO
@@ -68,7 +68,7 @@ type lpSystemInfo struct {
WProcessorRevision uint16
}
// SystemInfo is an idiomatic wrapper for LpSystemInfo
// SystemInfo is an idiomatic wrapper for LpSystemInfo.
type SystemInfo struct {
Arch ProcessorArchitecture
PageSize uint32
@@ -82,10 +82,10 @@ type SystemInfo struct {
ProcessorRevision uint16
}
// WinComputerNameFormat is a wrapper for COMPUTER_NAME_FORMAT
// WinComputerNameFormat is a wrapper for COMPUTER_NAME_FORMAT.
type WinComputerNameFormat int
// Definitions for WinComputerNameFormat constants
// Definitions for WinComputerNameFormat constants.
const (
ComputerNameNetBIOS WinComputerNameFormat = iota
ComputerNameDNSHostname
@@ -112,7 +112,7 @@ func GlobalMemoryStatusEx() (MemoryStatus, error) {
mse.dwLength = (uint32)(unsafe.Sizeof(mse))
r1, _, err := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&mse)))
if ret := *(*bool)(unsafe.Pointer(&r1)); ret == false {
if ret := *(*bool)(unsafe.Pointer(&r1)); !ret {
return MemoryStatus{}, err
}

View File

@@ -12,7 +12,7 @@ import (
type WTSTypeClass int
// The valid values for the WTSTypeClass enumeration
// The valid values for the WTSTypeClass enumeration.
const (
WTSTypeProcessInfoLevel0 WTSTypeClass = iota
WTSTypeProcessInfoLevel1
@@ -129,19 +129,20 @@ func WTSOpenServer(server string) (syscall.Handle, error) {
}
func WTSCloseServer(server syscall.Handle) error {
_, _, err := procWTSCloseServer.Call(uintptr(server))
if err != nil {
r1, _, err := procWTSCloseServer.Call(uintptr(server))
if r1 != 1 {
return fmt.Errorf("failed to close server: %w", err)
}
return err
return nil
}
func WTSFreeMemoryEx(class WTSTypeClass, pMemory uintptr, NumberOfEntries uint32) error {
func WTSFreeMemoryEx(class WTSTypeClass, pMemory uintptr, numberOfEntries uint32) error {
r1, _, err := procWTSFreeMemoryEx.Call(
uintptr(class),
pMemory,
uintptr(NumberOfEntries),
uintptr(numberOfEntries),
)
if r1 != 1 {
@@ -181,7 +182,7 @@ func WTSEnumerateSessionsEx(server syscall.Handle, logger log.Logger) ([]WTSSess
sessionSize := unsafe.Sizeof(sizeTest)
sessions := make([]WTSSession, 0, count)
for i := uint32(0); i < count; i++ {
for i := range count {
curPtr := unsafe.Pointer(sessionInfoPointer + (uintptr(i) * sessionSize))
data := (*wtsSessionInfo1)(curPtr)

View File

@@ -17,31 +17,32 @@ type windowsExporterService struct{}
var logger *eventlog.Log
//nolint:nonamedreturns
func (s *windowsExporterService) Execute(_ []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) {
const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
changes <- svc.Status{State: svc.StartPending}
changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}
loop:
for {
select {
case c := <-r:
switch c.Cmd {
case svc.Interrogate:
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
_ = logger.Info(100, "Service Stop Received")
changes <- svc.Status{State: svc.StopPending}
break loop
default:
_ = logger.Error(102, fmt.Sprintf("unexpected control request #%d", c))
}
for c := range r {
switch c.Cmd {
case svc.Interrogate:
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
_ = logger.Info(100, "Service Stop Received")
changes <- svc.Status{State: svc.StopPending}
return
default:
_ = logger.Error(102, fmt.Sprintf("unexpected control request #%d", c))
}
}
return
}
var StopCh = make(chan bool)
//nolint:gochecknoinits
func init() {
isService, err := svc.IsWindowsService()
if err != nil {

View File

@@ -6,6 +6,7 @@ package eventlog
import (
"bytes"
"errors"
"fmt"
"io"
"sync"
@@ -54,7 +55,11 @@ type eventlogLogger struct {
func (l *eventlogLogger) Log(keyvals ...interface{}) error {
priority := l.prioritySelector(keyvals...)
lb := l.getLoggerBuf()
lb, err := l.getLoggerBuf()
if err != nil {
return err
}
defer l.putLoggerBuf(lb)
if err := lb.logger.Log(keyvals...); err != nil {
return err
@@ -65,7 +70,7 @@ func (l *eventlogLogger) Log(keyvals ...interface{}) error {
msg, err := syscall.UTF16PtrFromString(lb.buf.String())
if err != nil {
return fmt.Errorf("error convert string to UTF-16: %v", err)
return fmt.Errorf("error convert string to UTF-16: %w", err)
}
ss := []*uint16{msg, nil, nil, nil, nil, nil, nil, nil, nil}
@@ -77,15 +82,19 @@ type loggerBuf struct {
logger log.Logger
}
func (l *eventlogLogger) getLoggerBuf() *loggerBuf {
lb := l.bufPool.Get().(*loggerBuf)
func (l *eventlogLogger) getLoggerBuf() (*loggerBuf, error) {
lb, ok := l.bufPool.Get().(*loggerBuf)
if !ok {
return nil, errors.New("failed to get loggerBuf from pool")
}
if lb.buf == nil {
lb.buf = &bytes.Buffer{}
lb.logger = l.newLogger(lb.buf)
} else {
lb.buf.Reset()
}
return lb
return lb, nil
}
func (l *eventlogLogger) putLoggerBuf(lb *loggerBuf) {
@@ -95,7 +104,7 @@ func (l *eventlogLogger) putLoggerBuf(lb *loggerBuf) {
// PrioritySelector inspects the list of keyvals and selects an eventlog priority.
type PrioritySelector func(keyvals ...interface{}) Priority
// defaultPrioritySelector convert a kit/log level into a Windows Eventlog level
// defaultPrioritySelector convert a kit/log level into a Windows Eventlog level.
func defaultPrioritySelector(keyvals ...interface{}) Priority {
l := len(keyvals)

View File

@@ -20,14 +20,14 @@ import (
promlogflag "github.com/prometheus/common/promlog/flag"
)
// FileFlagName is the canonical flag name to configure the log file
// FileFlagName is the canonical flag name to configure the log file.
const FileFlagName = "log.file"
// FileFlagHelp is the help description for the log.file flag.
const FileFlagHelp = "Output file of log messages. One of [stdout, stderr, eventlog, <path to log file>]"
// AddFlags adds the flags used by this package to the Kingpin application.
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine).
func AddFlags(a *kingpin.Application, config *log.Config) {
config.Level = &promlog.AllowedLevel{}
a.Flag(promlogflag.LevelFlagName, promlogflag.LevelFlagHelp).

Some files were not shown because too many files have changed in this diff Show More