Compare commits

...

16 Commits

Author SHA1 Message Date
Calle Pettersson
f8f34ab983 Merge pull request #529 from martinlindhe/check-collector-exists
Check that collectors given on --collectors.enabled exist before trying to construct them
2020-05-24 20:04:02 +02:00
Calle Pettersson
5052f4d00e Merge pull request #500 from martinlindhe/update-template
Update collector generator template
2020-05-24 20:03:44 +02:00
rmyhren
3c98edaa2c Adds MS Exchange collector (#508)
Adds MS Exchange collector

Co-authored-by: Robin Eikaas <robin@eika.as>
Co-authored-by: Robert Myhren <robert.myhren@skatteetaten.no>
2020-05-24 18:05:27 +02:00
Calle Pettersson
2ea20ff628 Check that collectors given on --collectors.enabled exist before trying to construct them 2020-05-24 17:23:13 +02:00
Calle Pettersson
245b2b4d28 Merge pull request #523 from basroovers/master
Replace Get-WMIObject with Get-CimInstance for PS7 support
2020-05-20 09:24:16 +02:00
Bas Roovers
7742ff6a1c Replaced Get-WMIObject with Get-CimInstance for PS7 support 2020-05-20 08:48:53 +02:00
Guillermo Sanchez Gavier
99ed969bf7 add wmi_service_info metric with display_name and pid labels (#516)
* add wmi_service_info metric
2020-05-15 13:13:25 +02:00
Calle Pettersson
54d94c261b Merge pull request #493 from breed808/dhcp
Add dhcp collector
2020-05-04 09:25:18 +02:00
Ben Reedy
05d1e4bde9 Add dhcp collector
Unfortunately perflib does not expose scope statistics
(DhcpServerv4ScopeStatistics), and WMI queries to this class don't
appear to be possible with the stackexchange/wmi module.
2020-05-04 08:08:34 +10:00
Ashok Siyani
17324b9fc6 add Terminal service & RemoteFx Collector (#491) 2020-04-23 12:17:17 +02:00
Calle Pettersson
7890c9ce91 Merge pull request #506 from martinlindhe/fix-adfs-dependencies
adfs collector missing dependency
2020-04-19 21:51:47 +02:00
Calle Pettersson
bcb6f2b218 adfs collector missing dependency 2020-04-19 21:44:39 +02:00
Calle Pettersson
31605b5096 Update collector generator template 2020-04-07 21:05:41 +02:00
Calle Pettersson
91a64fecb8 Merge pull request #498 from Mario-Hofstaetter/master
Fix README for process whitelist and expand docs
2020-04-04 15:15:54 +02:00
Mario Hofstätter
9148728b87 Expand process collector docs to show more regexp (#497) 2020-04-03 21:05:05 +02:00
Mario Hofstätter
2290969596 Fix README to use new --collector.process.whitelist (#497)
With PR #489 `--collector.process.processes-where` no longer works, changing example to use `--collector.process.whitelist` with regexp
2020-04-03 20:49:11 +02:00
18 changed files with 2035 additions and 52 deletions

View File

@@ -15,6 +15,7 @@ Name | Description | Enabled by default
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | &#10003;
[container](docs/collector.container.md) | Container metrics |
[dns](docs/collector.dns.md) | DNS Server |
[exchange](docs/collector.exchange.md) | Exchange metrics |
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
[iis](docs/collector.iis.md) | IIS sites and applications |
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | &#10003;
@@ -33,10 +34,12 @@ Name | Description | Enabled by default
[net](docs/collector.net.md) | Network interface I/O | &#10003;
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | &#10003;
[process](docs/collector.process.md) | Per-process metrics |
[remote_fx](docs/collector.remote_fx.md) | RemoteFX protocol (RDP) metrics |
[service](docs/collector.service.md) | Service state metrics | &#10003;
[system](docs/collector.system.md) | System calls | &#10003;
[tcp](docs/collector.tcp.md) | TCP connections |
[thermalzone](docs/collector.thermalzone.md) | Thermal information
[terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS)
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | &#10003;
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
@@ -97,11 +100,9 @@ The prometheus metrics will be exposed on [localhost:9182](http://localhost:9182
### Enable only process collector and specify a custom query
.\wmi_exporter.exe --collectors.enabled "process" --collector.process.processes-where "Name LIKE 'firefox%'"
.\wmi_exporter.exe --collectors.enabled "process" --collector.process.whitelist="firefox.+"
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the query needs to be a wildcard search using a `%` character.
Please note that in Windows batch scripts (and when using the `cmd` command prompt), the `%` character is reserved, so it has to be escaped with another `%`. For example, the wildcard syntax for searching for all firefox processes is `firefox%%`.
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the [regular expression](https://en.wikipedia.org/wiki/Regular_expression) must use `.+`. See [process](docs/collector.process.md) for more information.
## License

View File

@@ -7,7 +7,7 @@ import (
)
func init() {
registerCollector("adfs", newADFSCollector)
registerCollector("adfs", newADFSCollector, "AD FS")
}
type adfsCollector struct {

View File

@@ -1,6 +1,7 @@
package collector
import (
"fmt"
"strconv"
"strings"
@@ -72,7 +73,11 @@ func Available() []string {
return cs
}
func Build(collector string) (Collector, error) {
return builders[collector]()
builder, exists := builders[collector]
if !exists {
return nil, fmt.Errorf("Unknown collector %q", collector)
}
return builder()
}
func getPerfQuery(collectors []string) string {
parts := make([]string, 0, len(collectors))

387
collector/dhcp.go Normal file
View File

@@ -0,0 +1,387 @@
// +build windows
package collector
import (
"github.com/prometheus/client_golang/prometheus"
)
func init() {
registerCollector("dhcp", NewDhcpCollector, "DHCP Server")
}
// A DhcpCollector is a Prometheus collector perflib DHCP metrics
type DhcpCollector struct {
PacketsReceivedTotal *prometheus.Desc
DuplicatesDroppedTotal *prometheus.Desc
PacketsExpiredTotal *prometheus.Desc
ActiveQueueLength *prometheus.Desc
ConflictCheckQueueLength *prometheus.Desc
DiscoversTotal *prometheus.Desc
OffersTotal *prometheus.Desc
RequestsTotal *prometheus.Desc
InformsTotal *prometheus.Desc
AcksTotal *prometheus.Desc
NacksTotal *prometheus.Desc
DeclinesTotal *prometheus.Desc
ReleasesTotal *prometheus.Desc
OfferQueueLength *prometheus.Desc
DeniedDueToMatch *prometheus.Desc
DeniedDueToNonMatch *prometheus.Desc
FailoverBndupdSentTotal *prometheus.Desc
FailoverBndupdReceivedTotal *prometheus.Desc
FailoverBndackSentTotal *prometheus.Desc
FailoverBndackReceivedTotal *prometheus.Desc
FailoverBndupdPendingOutboundQueue *prometheus.Desc
FailoverTransitionsCommunicationinterruptedState *prometheus.Desc
FailoverTransitionsPartnerdownState *prometheus.Desc
FailoverTransitionsRecoverState *prometheus.Desc
FailoverBndupdDropped *prometheus.Desc
}
func NewDhcpCollector() (Collector, error) {
const subsystem = "dhcp"
return &DhcpCollector{
PacketsReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_received_total"),
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
nil,
nil,
),
DuplicatesDroppedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "duplicates_dropped_total"),
"Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)",
nil,
nil,
),
PacketsExpiredTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "packets_expired_total"),
"Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)",
nil,
nil,
),
ActiveQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "active_queue_length"),
"Number of packets in the processing queue of the DHCP server (ActiveQueueLength)",
nil,
nil,
),
ConflictCheckQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "conflict_check_queue_length"),
"Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)",
nil,
nil,
),
DiscoversTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "discovers_total"),
"Total DHCP Discovers received by the DHCP server (DiscoversTotal)",
nil,
nil,
),
OffersTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "offers_total"),
"Total DHCP Offers sent by the DHCP server (OffersTotal)",
nil,
nil,
),
RequestsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "requests_total"),
"Total DHCP Requests received by the DHCP server (RequestsTotal)",
nil,
nil,
),
InformsTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "informs_total"),
"Total DHCP Informs received by the DHCP server (InformsTotal)",
nil,
nil,
),
AcksTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "acks_total"),
"Total DHCP Acks sent by the DHCP server (AcksTotal)",
nil,
nil,
),
NacksTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "nacks_total"),
"Total DHCP Nacks sent by the DHCP server (NacksTotal)",
nil,
nil,
),
DeclinesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "declines_total"),
"Total DHCP Declines received by the DHCP server (DeclinesTotal)",
nil,
nil,
),
ReleasesTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "releases_total"),
"Total DHCP Releases received by the DHCP server (ReleasesTotal)",
nil,
nil,
),
OfferQueueLength: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "offer_queue_length"),
"Number of packets in the offer queue of the DHCP server (OfferQueueLength)",
nil,
nil,
),
DeniedDueToMatch: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "denied_due_to_match_total"),
"Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)",
nil,
nil,
),
DeniedDueToNonMatch: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "denied_due_to_match_total"),
"Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)",
nil,
nil,
),
FailoverBndupdSentTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_sent_total"),
"Number of DHCP failover Binding Update messages sent (FailoverBndupdSentTotal)",
nil,
nil,
),
FailoverBndupdReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_received_total"),
"Number of DHCP failover Binding Update messages received (FailoverBndupdReceivedTotal)",
nil,
nil,
),
FailoverBndackSentTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndack_sent_total"),
"Number of DHCP failover Binding Ack messages sent (FailoverBndackSentTotal)",
nil,
nil,
),
FailoverBndackReceivedTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndack_received_total"),
"Number of DHCP failover Binding Ack messages received (FailoverBndackReceivedTotal)",
nil,
nil,
),
FailoverBndupdPendingOutboundQueue: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_pending_in_outbound_queue"),
"Number of pending outbound DHCP failover Binding Update messages (FailoverBndupdPendingOutboundQueue)",
nil,
nil,
),
FailoverTransitionsCommunicationinterruptedState: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_communicationinterrupted_state_total"),
"Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)",
nil,
nil,
),
FailoverTransitionsPartnerdownState: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_partnerdown_state_total"),
"Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)",
nil,
nil,
),
FailoverTransitionsRecoverState: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_recover_total"),
"Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)",
nil,
nil,
),
FailoverBndupdDropped: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_dropped_total"),
"Total number of DHCP faileover Binding Updates dropped (FailoverBndupdDropped)",
nil,
nil,
),
}, nil
}
// represents perflib metrics from the DHCP Server class.
// While the name of a number of perflib metrics would indicate a rate is being returned (E.G. Packets Received/sec),
// perflib instead returns a counter, hence the "Total" suffix in some of the variable names.
type dhcpPerf struct {
PacketsReceivedTotal float64 `perflib:"Packets Received/sec"`
DuplicatesDroppedTotal float64 `perflib:"Duplicates Dropped/sec"`
PacketsExpiredTotal float64 `perflib:"Packets Expired/sec"`
ActiveQueueLength float64 `perflib:"Active Queue Length"`
ConflictCheckQueueLength float64 `perflib:"Conflict Check Queue Length"`
DiscoversTotal float64 `perflib:"Discovers/sec"`
OffersTotal float64 `perflib:"Offers/sec"`
RequestsTotal float64 `perflib:"Requests/sec"`
InformsTotal float64 `perflib:"Informs/sec"`
AcksTotal float64 `perflib:"Acks/sec"`
NacksTotal float64 `perflib:"Nacks/sec"`
DeclinesTotal float64 `perflib:"Declines/sec"`
ReleasesTotal float64 `perflib:"Releases/sec"`
DeniedDueToMatch float64 `perflib:"Denied due to match."`
DeniedDueToNonMatch float64 `perflib:"Denied due to match."`
OfferQueueLength float64 `perflib:"Offer Queue Length"`
FailoverBndupdSentTotal float64 `perflib:"Failover: BndUpd sent/sec."`
FailoverBndupdReceivedTotal float64 `perflib:"Failover: BndUpd received/sec."`
FailoverBndackSentTotal float64 `perflib:"Failover: BndAck sent/sec."`
FailoverBndackReceivedTotal float64 `perflib:"Failover: BndAck received/sec."`
FailoverBndupdPendingOutboundQueue float64 `perflib:"Failover: BndUpd pending in outbound queue."`
FailoverTransitionsCommunicationinterruptedState float64 `perflib:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
FailoverTransitionsPartnerdownState float64 `perflib:"Failover: Transitions to PARTNER-DOWN state."`
FailoverTransitionsRecoverState float64 `perflib:"Failover: Transitions to RECOVER state."`
FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."`
}
func (c *DhcpCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var perflib []dhcpPerf
if err := unmarshalObject(ctx.perfObjects["DHCP Server"], &perflib); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.PacketsReceivedTotal,
prometheus.CounterValue,
perflib[0].PacketsReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DuplicatesDroppedTotal,
prometheus.CounterValue,
perflib[0].DuplicatesDroppedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsExpiredTotal,
prometheus.CounterValue,
perflib[0].PacketsExpiredTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveQueueLength,
prometheus.GaugeValue,
perflib[0].ActiveQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.ConflictCheckQueueLength,
prometheus.GaugeValue,
perflib[0].ConflictCheckQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.DiscoversTotal,
prometheus.CounterValue,
perflib[0].DiscoversTotal,
)
ch <- prometheus.MustNewConstMetric(
c.OffersTotal,
prometheus.CounterValue,
perflib[0].OffersTotal,
)
ch <- prometheus.MustNewConstMetric(
c.RequestsTotal,
prometheus.CounterValue,
perflib[0].RequestsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.InformsTotal,
prometheus.CounterValue,
perflib[0].InformsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.AcksTotal,
prometheus.CounterValue,
perflib[0].AcksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.NacksTotal,
prometheus.CounterValue,
perflib[0].NacksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.DeclinesTotal,
prometheus.CounterValue,
perflib[0].DeclinesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.ReleasesTotal,
prometheus.CounterValue,
perflib[0].ReleasesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.OfferQueueLength,
prometheus.GaugeValue,
perflib[0].OfferQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.DeniedDueToMatch,
prometheus.CounterValue,
perflib[0].DeniedDueToMatch,
)
ch <- prometheus.MustNewConstMetric(
c.DeniedDueToNonMatch,
prometheus.CounterValue,
perflib[0].DeniedDueToNonMatch,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdSentTotal,
prometheus.CounterValue,
perflib[0].FailoverBndupdSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdReceivedTotal,
prometheus.CounterValue,
perflib[0].FailoverBndupdReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndackSentTotal,
prometheus.CounterValue,
perflib[0].FailoverBndackSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndackReceivedTotal,
prometheus.CounterValue,
perflib[0].FailoverBndackReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdPendingOutboundQueue,
prometheus.GaugeValue,
perflib[0].FailoverBndupdPendingOutboundQueue,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsCommunicationinterruptedState,
prometheus.CounterValue,
perflib[0].FailoverTransitionsCommunicationinterruptedState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsPartnerdownState,
prometheus.CounterValue,
perflib[0].FailoverTransitionsPartnerdownState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverTransitionsRecoverState,
prometheus.CounterValue,
perflib[0].FailoverTransitionsRecoverState,
)
ch <- prometheus.MustNewConstMetric(
c.FailoverBndupdDropped,
prometheus.CounterValue,
perflib[0].FailoverBndupdDropped,
)
return nil
}

609
collector/exchange.go Normal file
View File

@@ -0,0 +1,609 @@
// +build windows
package collector
import (
"fmt"
"os"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
func init() {
registerCollector("exchange", newExchangeCollector,
"MSExchange ADAccess Processes",
"MSExchangeTransport Queues",
"MSExchange HttpProxy",
"MSExchange ActiveSync",
"MSExchange Availability Service",
"MSExchange OWA",
"MSExchangeAutodiscover",
"MSExchange WorkloadManagement Workloads",
"MSExchange RpcClientAccess",
)
}
type exchangeCollector struct {
LDAPReadTime *prometheus.Desc
LDAPSearchTime *prometheus.Desc
LDAPWriteTime *prometheus.Desc
LDAPTimeoutErrorsPerSec *prometheus.Desc
LongRunningLDAPOperationsPerMin *prometheus.Desc
ExternalActiveRemoteDeliveryQueueLength *prometheus.Desc
InternalActiveRemoteDeliveryQueueLength *prometheus.Desc
ActiveMailboxDeliveryQueueLength *prometheus.Desc
RetryMailboxDeliveryQueueLength *prometheus.Desc
UnreachableQueueLength *prometheus.Desc
ExternalLargestDeliveryQueueLength *prometheus.Desc
InternalLargestDeliveryQueueLength *prometheus.Desc
PoisonQueueLength *prometheus.Desc
MailboxServerLocatorAverageLatency *prometheus.Desc
AverageAuthenticationLatency *prometheus.Desc
AverageCASProcessingLatency *prometheus.Desc
MailboxServerProxyFailureRate *prometheus.Desc
OutstandingProxyRequests *prometheus.Desc
ProxyRequestsPerSec *prometheus.Desc
ActiveSyncRequestsPerSec *prometheus.Desc
PingCommandsPending *prometheus.Desc
SyncCommandsPerSec *prometheus.Desc
AvailabilityRequestsSec *prometheus.Desc
CurrentUniqueUsers *prometheus.Desc
OWARequestsPerSec *prometheus.Desc
AutodiscoverRequestsPerSec *prometheus.Desc
ActiveTasks *prometheus.Desc
CompletedTasks *prometheus.Desc
QueuedTasks *prometheus.Desc
YieldedTasks *prometheus.Desc
IsActive *prometheus.Desc
RPCAveragedLatency *prometheus.Desc
RPCRequests *prometheus.Desc
ActiveUserCount *prometheus.Desc
ConnectionCount *prometheus.Desc
RPCOperationsPerSec *prometheus.Desc
UserCount *prometheus.Desc
ActiveCollFuncs []func(ctx *ScrapeContext, ch chan<- prometheus.Metric) error
}
var (
// All available collector functions
exchangeAllCollectorNames = []string{
"ADAccessProcesses",
"TransportQueues",
"HttpProxy",
"ActiveSync",
"AvailabilityService",
"OutlookWebAccess",
"Autodiscover",
"WorkloadManagement",
"RpcClientAccess",
}
argExchangeListAllCollectors = kingpin.Flag(
"collectors.exchange.list",
"List the collectors along with their perflib object name/ids",
).Bool()
)
// newExchangeCollector returns a new Collector
func newExchangeCollector() (Collector, error) {
// desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
return prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "exchange", metricName),
description,
labels,
nil,
)
}
c := exchangeCollector{
RPCAveragedLatency: desc("rpc_avg_latency_sec", "The latency (sec), averaged for the past 1024 packets"),
RPCRequests: desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service"),
ActiveUserCount: desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes"),
ConnectionCount: desc("rpc_connection_count", "Total number of client connections maintained"),
RPCOperationsPerSec: desc("rpc_operations_total", "The rate at which RPC operations occur"),
UserCount: desc("rpc_user_count", "Number of users"),
LDAPReadTime: desc("ldap_read_time_sec", "Time (sec) to send an LDAP read request and receive a response", "name"),
LDAPSearchTime: desc("ldap_search_time_sec", "Time (sec) to send an LDAP search request and receive a response", "name"),
LDAPWriteTime: desc("ldap_write_time_sec", "Time (sec) to send an LDAP Add/Modify/Delete request and receive a response", "name"),
LDAPTimeoutErrorsPerSec: desc("ldap_timeout_errors_total", "Total number of LDAP timeout errors", "name"),
LongRunningLDAPOperationsPerMin: desc("ldap_long_running_ops_per_sec", "Long Running LDAP operations per second", "name"),
ExternalActiveRemoteDeliveryQueueLength: desc("transport_queues_external_active_remote_delivery", "External Active Remote Delivery Queue length", "name"),
InternalActiveRemoteDeliveryQueueLength: desc("transport_queues_internal_active_remote_delivery", "Internal Active Remote Delivery Queue length", "name"),
ActiveMailboxDeliveryQueueLength: desc("transport_queues_active_mailbox_delivery", "Active Mailbox Delivery Queue length", "name"),
RetryMailboxDeliveryQueueLength: desc("transport_queues_retry_mailbox_delivery", "Retry Mailbox Delivery Queue length", "name"),
UnreachableQueueLength: desc("transport_queues_unreachable", "Unreachable Queue length", "name"),
ExternalLargestDeliveryQueueLength: desc("transport_queues_external_largest_delivery", "External Largest Delivery Queue length", "name"),
InternalLargestDeliveryQueueLength: desc("transport_queues_internal_largest_delivery", "Internal Largest Delivery Queue length", "name"),
PoisonQueueLength: desc("transport_queues_poison", "Poison Queue length", "name"),
MailboxServerLocatorAverageLatency: desc("http_proxy_mailbox_server_locator_avg_latency_sec", "Average latency (sec) of MailboxServerLocator web service calls", "name"),
AverageAuthenticationLatency: desc("http_proxy_avg_auth_latency", "Average time spent authenticating CAS requests over the last 200 samples", "name"),
OutstandingProxyRequests: desc("http_proxy_outstanding_proxy_requests", "Number of concurrent outstanding proxy requests", "name"),
ProxyRequestsPerSec: desc("http_proxy_requests_total", "Number of proxy requests processed each second", "name"),
AvailabilityRequestsSec: desc("avail_service_requests_per_sec", "Number of requests serviced per second"),
CurrentUniqueUsers: desc("owa_current_unique_users", "Number of unique users currently logged on to Outlook Web App"),
OWARequestsPerSec: desc("owa_requests_total", "Number of requests handled by Outlook Web App per second"),
AutodiscoverRequestsPerSec: desc("autodiscover_requests_total", "Number of autodiscover service requests processed each second"),
ActiveTasks: desc("workload_active_tasks", "Number of active tasks currently running in the background for workload management", "name"),
CompletedTasks: desc("workload_completed_tasks", "Number of workload management tasks that have been completed", "name"),
QueuedTasks: desc("workload_queued_tasks", "Number of workload management tasks that are currently queued up waiting to be processed", "name"),
YieldedTasks: desc("workload_yielded_tasks", "The total number of tasks that have been yielded by a workload", "name"),
IsActive: desc("workload_is_active", "Active indicates whether the workload is in an active (1) or paused (0) state", "name"),
ActiveSyncRequestsPerSec: desc("activesync_requests_total", "Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load"),
AverageCASProcessingLatency: desc("http_proxy_avg_cas_proccessing_latency_sec", "Average latency (sec) of CAS processing time over the last 200 reqs", "name"),
MailboxServerProxyFailureRate: desc("http_proxy_mailbox_proxy_failure_rate", "% of failures between this CAS and MBX servers over the last 200 samples", "name"),
PingCommandsPending: desc("activesync_ping_cmds_pending", "Number of ping commands currently pending in the queue"),
SyncCommandsPerSec: desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder"),
}
collectorDesc := map[string]string{
"ADAccessProcesses": "[19108] MSExchange ADAccess Processes",
"TransportQueues": "[20524] MSExchangeTransport Queues",
"HttpProxy": "[36934] MSExchange HttpProxy",
"ActiveSync": "[25138] MSExchange ActiveSync",
"AvailabilityService": "[24914] MSExchange Availability Service",
"OutlookWebAccess": "[24618] MSExchange OWA",
"Autodiscover": "[29240] MSExchange Autodiscover",
"WorkloadManagement": "[19430] MSExchange WorkloadManagement Workloads",
"RpcClientAccess": "[29336] MSExchange RpcClientAccess",
}
if *argExchangeListAllCollectors {
fmt.Printf("%-32s %-32s\n", "Collector Name", "[PerfID] Perflib Object")
for _, cname := range exchangeAllCollectorNames {
fmt.Printf("%-32s %-32s\n", cname, collectorDesc[cname])
}
os.Exit(0)
}
return &c, nil
}
// Collect collects exchange metrics and sends them to prometheus
func (c *exchangeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
for collectorName, collectorFunc := range map[string]func(ctx *ScrapeContext, ch chan<- prometheus.Metric) error{
"ADAccessProcesses": c.collectADAccessProcesses,
"TransportQueues": c.collectTransportQueues,
"HttpProxy": c.collectHTTPProxy,
"ActiveSync": c.collectActiveSync,
"AvailabilityService": c.collectAvailabilityService,
"OutlookWebAccess": c.collectOWA,
"Autodiscover": c.collectAutoDiscover,
"WorkloadManagement": c.collectWorkloadManagementWorkloads,
"RpcClientAccess": c.collectRPC,
} {
if err := collectorFunc(ctx, ch); err != nil {
log.Errorf("Error in %s: %s", collectorName, err)
return err
}
}
return nil
}
// Perflib: [19108] MSExchange ADAccess Processes
type perflibADAccessProcesses struct {
Name string
LDAPReadTime float64 `perflib:"LDAP Read Time"`
LDAPSearchTime float64 `perflib:"LDAP Search Time"`
LDAPWriteTime float64 `perflib:"LDAP Write Time"`
LDAPTimeoutErrorsPerSec float64 `perflib:"LDAP Timeout Errors/sec"`
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
}
func (c *exchangeCollector) collectADAccessProcesses(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibADAccessProcesses
if err := unmarshalObject(ctx.perfObjects["MSExchange ADAccess Processes"], &data); err != nil {
return err
}
labelUseCount := make(map[string]int)
for _, proc := range data {
labelName := c.toLabelName(proc.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
// since we're not including the PID suffix from the instance names in the label names,
// we get an occational duplicate. This seems to affect about 4 instances only on this object.
labelUseCount[labelName]++
if labelUseCount[labelName] > 1 {
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
}
ch <- prometheus.MustNewConstMetric(
c.LDAPReadTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPReadTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPSearchTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPSearchTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPWriteTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPWriteTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LDAPTimeoutErrorsPerSec,
prometheus.CounterValue,
proc.LDAPTimeoutErrorsPerSec,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.LongRunningLDAPOperationsPerMin,
prometheus.CounterValue,
proc.LongRunningLDAPOperationsPerMin*60,
labelName,
)
}
return nil
}
// Perflib: [24914] MSExchange Availability Service
type perflibAvailabilityService struct {
RequestsSec float64 `perflib:"Availability Requests (sec)"`
}
func (c *exchangeCollector) collectAvailabilityService(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAvailabilityService
if err := unmarshalObject(ctx.perfObjects["MSExchange Availability Service"], &data); err != nil {
return err
}
for _, availservice := range data {
ch <- prometheus.MustNewConstMetric(
c.AvailabilityRequestsSec,
prometheus.CounterValue,
availservice.RequestsSec,
)
}
return nil
}
// Perflib: [36934] MSExchange HttpProxy
type perflibHTTPProxy struct {
Name string
MailboxServerLocatorAverageLatency float64 `perflib:"MailboxServerLocator Average Latency (Moving Average)"`
AverageAuthenticationLatency float64 `perflib:"Average Authentication Latency"`
AverageCASProcessingLatency float64 `perflib:"Average ClientAccess Server Processing Latency"`
MailboxServerProxyFailureRate float64 `perflib:"Mailbox Server Proxy Failure Rate"`
OutstandingProxyRequests float64 `perflib:"Outstanding Proxy Requests"`
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
}
func (c *exchangeCollector) collectHTTPProxy(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibHTTPProxy
if err := unmarshalObject(ctx.perfObjects["MSExchange HttpProxy"], &data); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
ch <- prometheus.MustNewConstMetric(
c.MailboxServerLocatorAverageLatency,
prometheus.GaugeValue,
c.msToSec(instance.MailboxServerLocatorAverageLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.AverageAuthenticationLatency,
prometheus.GaugeValue,
instance.AverageAuthenticationLatency,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.AverageCASProcessingLatency,
prometheus.GaugeValue,
c.msToSec(instance.AverageCASProcessingLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.MailboxServerProxyFailureRate,
prometheus.GaugeValue,
instance.MailboxServerProxyFailureRate,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.OutstandingProxyRequests,
prometheus.GaugeValue,
instance.OutstandingProxyRequests,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ProxyRequestsPerSec,
prometheus.CounterValue,
instance.ProxyRequestsPerSec,
labelName,
)
}
return nil
}
// Perflib: [24618] MSExchange OWA
type perflibOWA struct {
CurrentUniqueUsers float64 `perflib:"Current Unique Users"`
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *exchangeCollector) collectOWA(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibOWA
if err := unmarshalObject(ctx.perfObjects["MSExchange OWA"], &data); err != nil {
return err
}
for _, owa := range data {
ch <- prometheus.MustNewConstMetric(
c.CurrentUniqueUsers,
prometheus.GaugeValue,
owa.CurrentUniqueUsers,
)
ch <- prometheus.MustNewConstMetric(
c.OWARequestsPerSec,
prometheus.CounterValue,
owa.RequestsPerSec,
)
}
return nil
}
// Perflib: [25138] MSExchange ActiveSync
type perflibActiveSync struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
PingCommandsPending float64 `perflib:"Ping Commands Pending"`
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
}
func (c *exchangeCollector) collectActiveSync(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibActiveSync
if err := unmarshalObject(ctx.perfObjects["MSExchange ActiveSync"], &data); err != nil {
return err
}
for _, instance := range data {
ch <- prometheus.MustNewConstMetric(
c.ActiveSyncRequestsPerSec,
prometheus.CounterValue,
instance.RequestsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.PingCommandsPending,
prometheus.GaugeValue,
instance.PingCommandsPending,
)
ch <- prometheus.MustNewConstMetric(
c.SyncCommandsPerSec,
prometheus.CounterValue,
instance.SyncCommandsPerSec,
)
}
return nil
}
// Perflib: [29366] MSExchange RpcClientAccess
type perflibRPCClientAccess struct {
RPCAveragedLatency float64 `perflib:"RPC Averaged Latency"`
RPCRequests float64 `perflib:"RPC Requests"`
ActiveUserCount float64 `perflib:"Active User Count"`
ConnectionCount float64 `perflib:"Connection Count"`
RPCOperationsPerSec float64 `perflib:"RPC Operations/sec"`
UserCount float64 `perflib:"User Count"`
}
func (c *exchangeCollector) collectRPC(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibRPCClientAccess
if err := unmarshalObject(ctx.perfObjects["MSExchange RpcClientAccess"], &data); err != nil {
return err
}
for _, rpc := range data {
ch <- prometheus.MustNewConstMetric(
c.RPCAveragedLatency,
prometheus.GaugeValue,
c.msToSec(rpc.RPCAveragedLatency),
)
ch <- prometheus.MustNewConstMetric(
c.RPCRequests,
prometheus.GaugeValue,
rpc.RPCRequests,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveUserCount,
prometheus.GaugeValue,
rpc.ActiveUserCount,
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionCount,
prometheus.GaugeValue,
rpc.ConnectionCount,
)
ch <- prometheus.MustNewConstMetric(
c.RPCOperationsPerSec,
prometheus.CounterValue,
rpc.RPCOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.UserCount,
prometheus.GaugeValue,
rpc.UserCount,
)
}
return nil
}
// Perflib: [20524] MSExchangeTransport Queues
type perflibTransportQueues struct {
Name string
ExternalActiveRemoteDeliveryQueueLength float64 `perflib:"External Active Remote Delivery Queue Length"`
InternalActiveRemoteDeliveryQueueLength float64 `perflib:"Internal Active Remote Delivery Queue Length"`
ActiveMailboxDeliveryQueueLength float64 `perflib:"Active Mailbox Delivery Queue Length"`
RetryMailboxDeliveryQueueLength float64 `perflib:"Retry Mailbox Delivery Queue Length"`
UnreachableQueueLength float64 `perflib:"Unreachable Queue Length"`
ExternalLargestDeliveryQueueLength float64 `perflib:"External Largest Delivery Queue Length"`
InternalLargestDeliveryQueueLength float64 `perflib:"Internal Largest Delivery Queue Length"`
PoisonQueueLength float64 `perflib:"Poison Queue Length"`
}
func (c *exchangeCollector) collectTransportQueues(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibTransportQueues
if err := unmarshalObject(ctx.perfObjects["MSExchangeTransport Queues"], &data); err != nil {
return err
}
for _, queue := range data {
labelName := c.toLabelName(queue.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.ExternalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.InternalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ActiveMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.ActiveMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.RetryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.RetryMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.UnreachableQueueLength,
prometheus.GaugeValue,
queue.UnreachableQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ExternalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.InternalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.PoisonQueueLength,
prometheus.GaugeValue,
queue.PoisonQueueLength,
labelName,
)
}
return nil
}
// Perflib: [19430] MSExchange WorkloadManagement Workloads
type perflibWorkloadManagementWorkloads struct {
Name string
ActiveTasks float64 `perflib:"ActiveTasks"`
CompletedTasks float64 `perflib:"CompletedTasks"`
QueuedTasks float64 `perflib:"QueuedTasks"`
YieldedTasks float64 `perflib:"YieldedTasks"`
IsActive float64 `perflib:"Active"`
}
func (c *exchangeCollector) collectWorkloadManagementWorkloads(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibWorkloadManagementWorkloads
if err := unmarshalObject(ctx.perfObjects["MSExchange WorkloadManagement Workloads"], &data); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.ActiveTasks,
prometheus.GaugeValue,
instance.ActiveTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.CompletedTasks,
prometheus.CounterValue,
instance.CompletedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.QueuedTasks,
prometheus.CounterValue,
instance.QueuedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.YieldedTasks,
prometheus.CounterValue,
instance.YieldedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.IsActive,
prometheus.GaugeValue,
instance.IsActive,
labelName,
)
}
return nil
}
// [29240] MSExchangeAutodiscover
type perflibAutodiscover struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *exchangeCollector) collectAutoDiscover(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var data []perflibAutodiscover
if err := unmarshalObject(ctx.perfObjects["MSExchangeAutodiscover"], &data); err != nil {
return err
}
for _, autodisc := range data {
ch <- prometheus.MustNewConstMetric(
c.AutodiscoverRequestsPerSec,
prometheus.CounterValue,
autodisc.RequestsPerSec,
)
}
return nil
}
// toLabelName converts strings to lowercase and replaces all whitespace and dots with underscores
func (c *exchangeCollector) toLabelName(name string) string {
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
s = strings.ReplaceAll(s, "__", "_")
return s
}
// msToSec converts from ms to seconds
func (c *exchangeCollector) msToSec(t float64) float64 {
return t / 1000
}

351
collector/remote_fx.go Normal file
View File

@@ -0,0 +1,351 @@
// +build windows
package collector
import (
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
func init() {
registerCollector("remote_fx", NewRemoteFx, "RemoteFX Network", "RemoteFX Graphics")
}
// A RemoteFxNetworkCollector is a Prometheus collector for
// WMI Win32_PerfRawData_Counters_RemoteFXNetwork & Win32_PerfRawData_Counters_RemoteFXGraphics metrics
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxnetwork/
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/
type RemoteFxCollector struct {
// net
BaseTCPRTT *prometheus.Desc
BaseUDPRTT *prometheus.Desc
CurrentTCPBandwidth *prometheus.Desc
CurrentTCPRTT *prometheus.Desc
CurrentUDPBandwidth *prometheus.Desc
CurrentUDPRTT *prometheus.Desc
TotalReceivedBytes *prometheus.Desc
TotalSentBytes *prometheus.Desc
UDPPacketsReceivedPersec *prometheus.Desc
UDPPacketsSentPersec *prometheus.Desc
//gfx
AverageEncodingTime *prometheus.Desc
FrameQuality *prometheus.Desc
FramesSkippedPerSecondInsufficientResources *prometheus.Desc
GraphicsCompressionratio *prometheus.Desc
InputFramesPerSecond *prometheus.Desc
OutputFramesPerSecond *prometheus.Desc
SourceFramesPerSecond *prometheus.Desc
}
// NewRemoteFx ...
func NewRemoteFx() (Collector, error) {
const subsystem = "remote_fx"
return &RemoteFxCollector{
// net
BaseTCPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_base_tcp_rtt_seconds"),
"Base TCP round-trip time (RTT) detected in seconds",
[]string{"session_name"},
nil,
),
BaseUDPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_base_udp_rtt_seconds"),
"Base UDP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
),
CurrentTCPBandwidth: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_tcp_bandwidth"),
"TCP Bandwidth detected in bytes per seccond.",
[]string{"session_name"},
nil,
),
CurrentTCPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_tcp_rtt_seconds"),
"Average TCP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
),
CurrentUDPBandwidth: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_udp_bandwidth"),
"UDP Bandwidth detected in bytes per second.",
[]string{"session_name"},
nil,
),
CurrentUDPRTT: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_current_udp_rtt_seconds"),
"Average UDP round-trip time (RTT) detected in seconds.",
[]string{"session_name"},
nil,
),
TotalReceivedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_received_bytes_total"),
"(TotalReceivedBytes)",
[]string{"session_name"},
nil,
),
TotalSentBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_sent_bytes_total"),
"(TotalSentBytes)",
[]string{"session_name"},
nil,
),
UDPPacketsReceivedPersec: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_udp_packets_received_total"),
"Rate in packets per second at which packets are received over UDP.",
[]string{"session_name"},
nil,
),
UDPPacketsSentPersec: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "net_udp_packets_sent_total"),
"Rate in packets per second at which packets are sent over UDP.",
[]string{"session_name"},
nil,
),
//gfx
AverageEncodingTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_average_encoding_time_seconds"),
"Average frame encoding time in seconds",
[]string{"session_name"},
nil,
),
FrameQuality: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_frame_quality"),
"Quality of the output frame expressed as a percentage of the quality of the source frame.",
[]string{"session_name"},
nil,
),
FramesSkippedPerSecondInsufficientResources: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_frames_skipped_insufficient_resource_total"),
"Number of frames skipped per second due to insufficient client resources.",
[]string{"session_name", "resource"},
nil,
),
GraphicsCompressionratio: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_graphics_compression_ratio"),
"Ratio of the number of bytes encoded to the number of bytes input.",
[]string{"session_name"},
nil,
),
InputFramesPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_input_frames_total"),
"Number of sources frames provided as input to RemoteFX graphics per second.",
[]string{"session_name"},
nil,
),
OutputFramesPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_output_frames_total"),
"Number of frames sent to the client per second.",
[]string{"session_name"},
nil,
),
SourceFramesPerSecond: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "gfx_source_frames_total"),
"Number of frames composed by the source (DWM) per second.",
[]string{"session_name"},
nil,
),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *RemoteFxCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
return err
}
if desc, err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
return err
}
return nil
}
type perflibRemoteFxNetwork struct {
Name string
BaseTCPRTT float64 `perflib:"Base TCP RTT"`
BaseUDPRTT float64 `perflib:"Base UDP RTT"`
CurrentTCPBandwidth float64 `perflib:"Current TCP Bandwidth"`
CurrentTCPRTT float64 `perflib:"Current TCP RTT"`
CurrentUDPBandwidth float64 `perflib:"Current UDP Bandwidth"`
CurrentUDPRTT float64 `perflib:"Current UDP RTT"`
TotalReceivedBytes float64 `perflib:"Total Received Bytes"`
TotalSentBytes float64 `perflib:"Total Sent Bytes"`
UDPPacketsReceivedPersec float64 `perflib:"UDP Packets Received/sec"`
UDPPacketsSentPersec float64 `perflib:"UDP Packets Sent/sec"`
}
func (c *RemoteFxCollector) collectRemoteFXNetworkCount(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteFxNetwork, 0)
err := unmarshalObject(ctx.perfObjects["RemoteFX Network"], &dst)
if err != nil {
return nil, err
}
for _, d := range dst {
// only connect metrics for remote named sessions
n := strings.ToLower(d.Name)
if n == "" || n == "services" || n == "console" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.BaseTCPRTT,
prometheus.GaugeValue,
milliSecToSec(d.BaseTCPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.BaseUDPRTT,
prometheus.GaugeValue,
milliSecToSec(d.BaseUDPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPBandwidth,
prometheus.GaugeValue,
(d.CurrentTCPBandwidth*1000)/8,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentTCPRTT,
prometheus.GaugeValue,
milliSecToSec(d.CurrentTCPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPBandwidth,
prometheus.GaugeValue,
(d.CurrentUDPBandwidth*1000)/8,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.CurrentUDPRTT,
prometheus.GaugeValue,
milliSecToSec(d.CurrentUDPRTT),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalReceivedBytes,
prometheus.CounterValue,
d.TotalReceivedBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.TotalSentBytes,
prometheus.CounterValue,
d.TotalSentBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsReceivedPersec,
prometheus.CounterValue,
d.UDPPacketsReceivedPersec,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.UDPPacketsSentPersec,
prometheus.CounterValue,
d.UDPPacketsSentPersec,
d.Name,
)
}
return nil, nil
}
type perflibRemoteFxGraphics struct {
Name string
AverageEncodingTime float64 `perflib:"Average Encoding Time"`
FrameQuality float64 `perflib:"Frame Quality"`
FramesSkippedPerSecondInsufficientClientResources float64 `perflib:"Frames Skipped/Second - Insufficient Server Resources"`
FramesSkippedPerSecondInsufficientNetworkResources float64 `perflib:"Frames Skipped/Second - Insufficient Network Resources"`
FramesSkippedPerSecondInsufficientServerResources float64 `perflib:"Frames Skipped/Second - Insufficient Client Resources"`
GraphicsCompressionratio float64 `perflib:"Graphics Compression ratio"`
InputFramesPerSecond float64 `perflib:"Input Frames/Second"`
OutputFramesPerSecond float64 `perflib:"Output Frames/Second"`
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
}
func (c *RemoteFxCollector) collectRemoteFXGraphicsCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteFxGraphics, 0)
err := unmarshalObject(ctx.perfObjects["RemoteFX Graphics"], &dst)
if err != nil {
return nil, err
}
for _, d := range dst {
// only connect metrics for remote named sessions
n := strings.ToLower(d.Name)
if n == "" || n == "services" || n == "console" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.AverageEncodingTime,
prometheus.GaugeValue,
milliSecToSec(d.AverageEncodingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FrameQuality,
prometheus.GaugeValue,
d.FrameQuality,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientClientResources,
d.Name,
"client",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientNetworkResources,
d.Name,
"network",
)
ch <- prometheus.MustNewConstMetric(
c.FramesSkippedPerSecondInsufficientResources,
prometheus.CounterValue,
d.FramesSkippedPerSecondInsufficientServerResources,
d.Name,
"server",
)
ch <- prometheus.MustNewConstMetric(
c.GraphicsCompressionratio,
prometheus.GaugeValue,
d.GraphicsCompressionratio,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.InputFramesPerSecond,
prometheus.CounterValue,
d.InputFramesPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.OutputFramesPerSecond,
prometheus.CounterValue,
d.OutputFramesPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.SourceFramesPerSecond,
prometheus.CounterValue,
d.SourceFramesPerSecond,
d.Name,
)
}
return nil, nil
}
func milliSecToSec(t float64) float64 {
return t / 1000
}

View File

@@ -3,6 +3,7 @@
package collector
import (
"strconv"
"strings"
"github.com/StackExchange/wmi"
@@ -24,9 +25,10 @@ var (
// A serviceCollector is a Prometheus collector for WMI Win32_Service metrics
type serviceCollector struct {
State *prometheus.Desc
StartMode *prometheus.Desc
Status *prometheus.Desc
Information *prometheus.Desc
State *prometheus.Desc
StartMode *prometheus.Desc
Status *prometheus.Desc
queryWhereClause string
}
@@ -40,6 +42,12 @@ func NewserviceCollector() (Collector, error) {
}
return &serviceCollector{
Information: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "info"),
"A metric with a constant '1' value labeled with service information",
[]string{"name", "display_name", "process_id"},
nil,
),
State: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"The state of the service (State)",
@@ -75,10 +83,12 @@ func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
// Win32_Service docs:
// - https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx
type Win32_Service struct {
Name string
State string
Status string
StartMode string
DisplayName string
Name string
ProcessId uint32
State string
Status string
StartMode string
}
var (
@@ -123,6 +133,16 @@ func (c *serviceCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
}
for _, service := range dst {
pid := strconv.FormatUint(uint64(service.ProcessId), 10)
ch <- prometheus.MustNewConstMetric(
c.Information,
prometheus.GaugeValue,
1.0,
strings.ToLower(service.Name),
service.DisplayName,
pid,
)
for _, state := range allStates {
isCurrentState := 0.0
if state == strings.ToLower(service.State) {

View File

@@ -0,0 +1,403 @@
// +build windows
package collector
import (
"errors"
"strings"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
const ConnectionBrokerFeatureID uint32 = 133
func init() {
registerCollector("terminal_services", NewTerminalServicesCollector, "Terminal Services", "Terminal Services Session", "Remote Desktop Connection Broker Counterset")
}
var (
connectionBrokerEnabled = isConnectionBrokerServer()
)
type Win32_ServerFeature struct {
ID uint32
}
func isConnectionBrokerServer() bool {
var dst []Win32_ServerFeature
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return false
}
for _, d := range dst {
if d.ID == ConnectionBrokerFeatureID {
return true
}
}
log.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
return false
}
// A TerminalServicesCollector is a Prometheus collector for WMI
// Win32_PerfRawData_LocalSessionManager_TerminalServices & Win32_PerfRawData_TermService_TerminalServicesSession metrics
// https://docs.microsoft.com/en-us/previous-versions/aa394344(v%3Dvs.85)
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/
type TerminalServicesCollector struct {
LocalSessionCount *prometheus.Desc
ConnectionBrokerPerformance *prometheus.Desc
HandleCount *prometheus.Desc
PageFaultsPersec *prometheus.Desc
PageFileBytes *prometheus.Desc
PageFileBytesPeak *prometheus.Desc
PercentPrivilegedTime *prometheus.Desc
PercentProcessorTime *prometheus.Desc
PercentUserTime *prometheus.Desc
PoolNonpagedBytes *prometheus.Desc
PoolPagedBytes *prometheus.Desc
PrivateBytes *prometheus.Desc
ThreadCount *prometheus.Desc
VirtualBytes *prometheus.Desc
VirtualBytesPeak *prometheus.Desc
WorkingSet *prometheus.Desc
WorkingSetPeak *prometheus.Desc
}
// NewTerminalServicesCollector ...
func NewTerminalServicesCollector() (Collector, error) {
const subsystem = "terminal_services"
return &TerminalServicesCollector{
LocalSessionCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "local_session_count"),
"Number of Terminal Services sessions",
[]string{"session"},
nil,
),
ConnectionBrokerPerformance: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connection_broker_performance_total"),
"The total number of connections handled by the Connection Brokers since the service started.",
[]string{"connection"},
nil,
),
HandleCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "handle_count"),
"Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process.",
[]string{"session_name"},
nil,
),
PageFaultsPersec: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "page_fault_total"),
"Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page.",
[]string{"session_name"},
nil,
),
PageFileBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "page_file_bytes"),
"Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
[]string{"session_name"},
nil,
),
PageFileBytesPeak: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "page_file_bytes_peak"),
"Maximum number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
[]string{"session_name"},
nil,
),
PercentPrivilegedTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "privileged_time_seconds_total"),
"Total elapsed time that the threads of the process have spent executing code in privileged mode.",
[]string{"session_name"},
nil,
),
PercentProcessorTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "processor_time_seconds_total"),
"Total elapsed time that all of the threads of this process used the processor to execute instructions.",
[]string{"session_name"},
nil,
),
PercentUserTime: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "user_time_seconds_total"),
"Total elapsed time that this process's threads have spent executing code in user mode. Applications, environment subsystems, and integral subsystems execute in user mode.",
[]string{"session_name"},
nil,
),
PoolNonpagedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pool_non_paged_bytes"),
"Number of bytes in the non-paged pool, an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. This property displays the last observed value only; it is not an average.",
[]string{"session_name"},
nil,
),
PoolPagedBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_bytes"),
"Number of bytes in the paged pool, an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. This property displays the last observed value only; it is not an average.",
[]string{"session_name"},
nil,
),
PrivateBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "private_bytes"),
"Current number of bytes this process has allocated that cannot be shared with other processes.",
[]string{"session_name"},
nil,
),
ThreadCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "thread_count"),
"Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.",
[]string{"session_name"},
nil,
),
VirtualBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "virtual_bytes"),
"Current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries.",
[]string{"session_name"},
nil,
),
VirtualBytesPeak: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "virtual_bytes_peak"),
"Maximum number of bytes of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process might limit its ability to load libraries.",
[]string{"session_name"},
nil,
),
WorkingSet: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "working_set_bytes"),
"Current number of bytes in the working set of this process. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
[]string{"session_name"},
nil,
),
WorkingSetPeak: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "working_set_bytes_peak"),
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
[]string{"session_name"},
nil,
),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *TerminalServicesCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectTSSessionCount(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
return err
}
if desc, err := c.collectTSSessionCounters(ctx, ch); err != nil {
log.Error("failed collecting terminal services session count metrics:", desc, err)
return err
}
// only collect CollectionBrokerPerformance if host is a Connection Broker
if connectionBrokerEnabled {
if desc, err := c.collectCollectionBrokerPerformanceCounter(ctx, ch); err != nil {
log.Error("failed collecting Connection Broker performance metrics:", desc, err)
return err
}
}
return nil
}
type perflibTerminalServices struct {
ActiveSessions float64 `perflib:"Active Sessions"`
InactiveSessions float64 `perflib:"Inactive Sessions"`
TotalSessions float64 `perflib:"Total Sessions"`
}
func (c *TerminalServicesCollector) collectTSSessionCount(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibTerminalServices, 0)
err := unmarshalObject(ctx.perfObjects["Terminal Services"], &dst)
if err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.LocalSessionCount,
prometheus.GaugeValue,
dst[0].ActiveSessions,
"active",
)
ch <- prometheus.MustNewConstMetric(
c.LocalSessionCount,
prometheus.GaugeValue,
dst[0].InactiveSessions,
"inactive",
)
ch <- prometheus.MustNewConstMetric(
c.LocalSessionCount,
prometheus.GaugeValue,
dst[0].TotalSessions,
"total",
)
return nil, nil
}
type perflibTerminalServicesSession struct {
Name string
HandleCount float64 `perflib:"Handle Count"`
PageFaultsPersec float64 `perflib:"Page Faults/sec"`
PageFileBytes float64 `perflib:"Page File Bytes"`
PageFileBytesPeak float64 `perflib:"Page File Bytes Peak"`
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
PercentProcessorTime float64 `perflib:"% Processor Time"`
PercentUserTime float64 `perflib:"% User Time"`
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
PrivateBytes float64 `perflib:"Private Bytes"`
ThreadCount float64 `perflib:"Thread Count"`
VirtualBytes float64 `perflib:"Virtual Bytes"`
VirtualBytesPeak float64 `perflib:"Virtual Bytes Peak"`
WorkingSet float64 `perflib:"Working Set"`
WorkingSetPeak float64 `perflib:"Working Set Peak"`
}
func (c *TerminalServicesCollector) collectTSSessionCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibTerminalServicesSession, 0)
err := unmarshalObject(ctx.perfObjects["Terminal Services Session"], &dst)
if err != nil {
return nil, err
}
for _, d := range dst {
// only connect metrics for remote named sessions
n := strings.ToLower(d.Name)
if n == "" || n == "services" || n == "console" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.HandleCount,
prometheus.GaugeValue,
d.HandleCount,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PageFaultsPersec,
prometheus.CounterValue,
d.PageFaultsPersec,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PageFileBytes,
prometheus.GaugeValue,
d.PageFileBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PageFileBytesPeak,
prometheus.GaugeValue,
d.PageFileBytesPeak,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PercentPrivilegedTime,
prometheus.CounterValue,
d.PercentPrivilegedTime,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PercentProcessorTime,
prometheus.CounterValue,
d.PercentProcessorTime,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PercentUserTime,
prometheus.CounterValue,
d.PercentUserTime,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PoolNonpagedBytes,
prometheus.GaugeValue,
d.PoolNonpagedBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PoolPagedBytes,
prometheus.GaugeValue,
d.PoolPagedBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.PrivateBytes,
prometheus.GaugeValue,
d.PrivateBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.ThreadCount,
prometheus.GaugeValue,
d.ThreadCount,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VirtualBytes,
prometheus.GaugeValue,
d.VirtualBytes,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.VirtualBytesPeak,
prometheus.GaugeValue,
d.VirtualBytesPeak,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSet,
prometheus.GaugeValue,
d.WorkingSet,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.WorkingSetPeak,
prometheus.GaugeValue,
d.WorkingSetPeak,
d.Name,
)
}
return nil, nil
}
type perflibRemoteDesktopConnectionBrokerCounterset struct {
SuccessfulConnections float64 `perflib:"Successful Connections"`
PendingConnections float64 `perflib:"Pending Connections"`
FailedConnections float64 `perflib:"Failed Connections"`
}
func (c *TerminalServicesCollector) collectCollectionBrokerPerformanceCounter(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
dst := make([]perflibRemoteDesktopConnectionBrokerCounterset, 0)
err := unmarshalObject(ctx.perfObjects["Remote Desktop Connection Broker Counterset"], &dst)
if err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.ConnectionBrokerPerformance,
prometheus.CounterValue,
dst[0].SuccessfulConnections,
"Successful",
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionBrokerPerformance,
prometheus.CounterValue,
dst[0].PendingConnections,
"Pending",
)
ch <- prometheus.MustNewConstMetric(
c.ConnectionBrokerPerformance,
prometheus.CounterValue,
dst[0].FailedConnections,
"Failed",
)
return nil, nil
}

View File

@@ -6,6 +6,7 @@ This directory contains documentation of the collectors in the WMI exporter, wit
- [`adfs`](collector.adfs.md)
- [`cpu`](collector.cpu.md)
- [`cs`](collector.cs.md)
- [`dhcp`](collector.dhcp.md)
- [`dns`](collector.dns.md)
- [`hyperv`](collector.hyperv.md)
- [`iis`](collector.iis.md)
@@ -25,8 +26,10 @@ This directory contains documentation of the collectors in the WMI exporter, wit
- [`net`](collector.net.md)
- [`os`](collector.os.md)
- [`process`](collector.process.md)
- [`remote_fx`](collector.remote_fx.md)
- [`service`](collector.service.md)
- [`system`](collector.system.md)
- [`tcp`](collector.tcp.md)
- [`terminal_services`](collector.terminal_services.md)
- [`textfile`](collector.textfile.md)
- [`vmware`](collector.vmware.md)

53
docs/collector.dhcp.md Normal file
View File

@@ -0,0 +1,53 @@
# dhcp collector
The dhcp collector exposes DHCP Server metrics
|||
-|-
Metric name prefix | `dhcp`
Data source | Perflib
Classes | `DHCP Server`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`packets_received_total` | Total number of packets received by the DHCP server | counter | None
`duplicates_dropped_total` | Total number of duplicate packets received by the DHCP server | counter | None
`packets_expired_total` | Total number of packets expired in the DHCP server message queue | counter | None
`active_queue_length` | Number of packets in the processing queue of the DHCP server | gauge | None
`conflict_check_queue_length` | Number of packets in the DHCP server queue waiting on conflict detection (ping) | gauge | None
`discovers_total` | Total DHCP Discovers received by the DHCP server | counter | None
`offers_total` | Total DHCP Offers sent by the DHCP server | counter | None
`requests_total` | Total DHCP Requests received by the DHCP server | counter | None
`informs_total` | Total DHCP Informs received by the DHCP server | counter | None
`acks_total` | Total DHCP Acks sent by the DHCP server | counter | None
`nacks_total` | Total DHCP Nacks sent by the DHCP server | counter | None
`declines_total` | Total DHCP Declines received by the DHCP server | counter | None
`releases_total` | Total DHCP Releases received by the DHCP server | counter | None
`offer_queue_length` | Number of packets in the offer queue of the DHCP server | gauge | None
`denied_due_to_match_total` | Total number of DHCP requests denied, based on matches from the Deny List | gauge | None
`denied_due_to_nonmatch_total` | Total number of DHCP requests denied, based on non-matches from the Allow List | gauge | None
`failover_bndupd_sent_total` | Number of DHCP failover Binding Update messages sent | counter | None
`failover_bndupd_received_total` | Number of DHCP failover Binding Update messages received | counter | None
`failover_bndack_sent_total` | Number of DHCP failover Binding Ack messages sent | counter | None
`failover_bndack_received_total` | Number of DHCP failover Binding Ack messages received | counter | None
`failover_bndupd_pending_in_outbound_queue` | Number of pending outbound DHCP failover Binding Update messages | counter | None
`failover_transitions_communicationinterrupted_state_total` | Total number of transitions into COMMUNICATION INTERRUPTED state | counter | None
`failover_transitions_partnerdown_state_total` | Total number of transitions into PARTNER DOWN state | counter | None
`failover_transitions_recover_total` | Total number of transitions into RECOVER state | counter | None
`failover_bndupd_dropped_total` | Total number of DHCP faileover Binding Updates dropped | counter | None
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -0,0 +1,67 @@
# exchange collector
The exchange collector collects metrics from MS Exchange hosts through perflib
=======
|||
-|-
Metric name prefix | `exchange`
Classes | [Win32_PerfRawData_MSExchangeADAccess_MSExchangeADAccessProcesses](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeTransportQueues_MSExchangeTransportueues](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_ESE_MSExchangeDatabaseInstances](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeHttpProxy_MSExchangeHttpProxy](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeActiveSync_MSExchangeActiveSync](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeAvailabilityService_MSExchangeAvailabilityService](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeOWA_MSExchangeOWA](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeAutodiscover_MSExchangeAutodiscover](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeWorkloadManagementWorkloads_MSExchangeWorkloadManagementWorkloads](https://docs.microsoft.com/en-us/exchange/)<br/> [Win32_PerfRawData_MSExchangeRpcClientAccess_MSExchangeRpcClientAccess](https://docs.microsoft.com/en-us/exchange/)<br/>
Enabled by default? | No
## Flags
### `--collectors.exchange.list`
Lists the Perflib Objects that are queried for data along with the perlfib object id
## Metrics
Name | Description
--------------|---------------
`wmi_exchange_rpc_avg_latency_sec` | The latency (sec), averaged for the past 1024 packets
`wmi_exchange_rpc_requests` | Number of client requests currently being processed by the RPC Client Access service
`wmi_exchange_rpc_active_user_count` | Number of unique users that have shown some kind of activity in the last 2 minutes
`wmi_exchange_rpc_connection_count` | Total number of client connections maintained
`wmi_exchange_rpc_operations_total` | The rate at which RPC operations occur
`wmi_exchange_rpc_user_count` | Number of users
`wmi_exchange_ldap_read_time_sec` | Time (sec) to send an LDAP read request and receive a response
`wmi_exchange_ldap_search_time_sec` | Time (sec) to send an LDAP search request and receive a response
`wmi_exchange_ldap_write_time_sec` | Time (sec) to send an LDAP Add/Modify/Delete request and receive a response
`wmi_exchange_ldap_timeout_errors_total` | Total number of LDAP timeout errors
`wmi_exchange_ldap_long_running_ops_per_sec` | Long Running LDAP operations per second
`wmi_exchange_transport_queues_external_active_remote_delivery` | External Active Remote Delivery Queue length
`wmi_exchange_transport_queues_internal_active_remote_delivery` | Internal Active Remote Delivery Queue length
`wmi_exchange_transport_queues_active_mailbox_delivery` | Active Mailbox Delivery Queue length
`wmi_exchange_transport_queues_retry_mailbox_delivery` | Retry Mailbox Delivery Queue length
`wmi_exchange_transport_queues_unreachable` | Unreachable Queue lengt
`wmi_exchange_transport_queues_external_largest_delivery` | External Largest Delivery Queue length
`wmi_exchange_transport_queues_internal_largest_delivery` | Internal Largest Delivery Queue length
`wmi_exchange_transport_queues_poison` | Poison Queue length
`wmi_exchange_http_proxy_mailbox_server_locator_avg_latency_sec` | Average latency (sec) of MailboxServerLocator web service calls
`wmi_exchange_http_proxy_avg_auth_latency` | Average time spent authenticating CAS requests over the last 200 samples
`wmi_exchange_http_proxy_outstanding_proxy_requests` | Number of concurrent outstanding proxy requests
`wmi_exchange_http_proxy_requests_total` | Number of proxy requests processed each second
`wmi_exchange_avail_service_requests_per_sec` | Number of requests serviced per second
`wmi_exchange_owa_current_unique_users` | Number of unique users currently logged on to Outlook Web App
`wmi_exchange_owa_requests_total` | Number of requests handled by Outlook Web App per second
`wmi_exchange_autodiscover_requests_total` | Number of autodiscover service requests processed each second
`wmi_exchange_workload_active_tasks` | Number of active tasks currently running in the background for workload management
`wmi_exchange_workload_completed_tasks` | Number of workload management tasks that have been completed
`wmi_exchange_workload_queued_tasks` | Number of workload management tasks that are currently queued up waiting to be processed
`wmi_exchange_workload_yielded_tasks` | The total number of tasks that have been yielded by a workload
`wmi_exchange_workload_is_active` | Active indicates whether the workload is in an active (1) or paused (0) state
`wmi_exchange_activesync_requests_total` | Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load
`wmi_exchange_http_proxy_avg_cas_proccessing_latency_sec` | Average latency (sec) of CAS processing time over the last 200 reqs
`wmi_exchange_http_proxy_mailbox_proxy_failure_rate` | % of failures between this CAS and MBX servers over the last 200 sample
`wmi_exchange_activesync_ping_cmds_pending` | Number of ping commands currently pending in the queue
`wmi_exchange_activesync_sync_cmds_total` | Number of sync commands processed per second. Clients use this command to synchronize items within a folder
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -26,9 +26,17 @@ metrics.
### Example
To match all firefox processes: `--collector.process.whitelist="firefox.+"`.
Note that multiple processes with the same name will be disambiguated by
Windows by adding a number suffix, such as `firefox#2`. Your regexp must take
Windows by adding a number suffix, such as `firefox#2`. Your [regexp](https://en.wikipedia.org/wiki/Regular_expression) must take
these suffixes into consideration.
:warning: The regexp is case-sensitive, so `--collector.process.whitelist="FIREFOX.+"` will **NOT** match a process named `firefox` .
To specify multiple names, use the pipe `|` character:
```
--collector.process.whitelist="firefox.+|FIREFOX.+|chrome.+"
```
This will match all processes named `firefox`, `FIREFOX` or `chrome` .
## Metrics
Name | Description | Type | Labels

View File

@@ -0,0 +1,51 @@
# remote_fx collector
The remote_fx collector exposes Performance Counters regarding the RemoteFX protocol (RDP). It exposes both network and graphics related performance counters.
|||
-|-
Metric name prefix | `remote_fx`
Data source | Perflib
Classes | [`Win32_PerfRawData_Counters_RemoteFXNetwork`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxnetwork/), [`Win32_PerfRawData_Counters_RemoteFXGraphics`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics), [more info...](https://docs.microsoft.com/en-us/azure/virtual-desktop/remotefx-graphics-performance-counters)
Enabled by default? | No
## Flags
None
## Metrics (Network)
Name | Description | Type | Labels
-----|-------------|------|-------
`wmi_remote_fx_net_base_udp_rtt_seconds` | Base UDP round-trip time (RTT) detected in seconds. | gauge | `session_name`
`wmi_remote_fx_net_base_tcp_rtt_seconds` | Base TCP round-trip time (RTT) detected in seconds. | gauge | `session_name`
`wmi_remote_fx_net_current_tcp_bandwidth` | TCP Bandwidth detected in bytes per second. | gauge | `session_name`
`wmi_remote_fx_net_current_tcp_rtt_seconds` | Average TCP round-trip time (RTT) detected in seconds. | gauge | `session_name`
`wmi_remote_fx_net_current_udp_bandwidth` | UDP Bandwidth detected in bytes per second. | gauge | `session_name`
`wmi_remote_fx_net_current_udp_rtt_seconds` | Average UDP round-trip time (RTT) detected in seconds. | gauge | `session_name`
`wmi_remote_fx_net_received_bytes_total` | _Not yet documented_ | counter | `session_name`
`wmi_remote_fx_net_sent_bytes_total` | _Not yet documented_ | counter | `session_name`
`wmi_remote_fx_net_udp_packets_received_total` | Rate in packets per second at which packets are received over UDP. | counter | `session_name`
`wmi_remote_fx_net_udp_packets_sent_total` | Rate in packets per second at which packets are sent over UDP. | counter | `session_name`
## Metrics (Graphics)
Name | Description | Type | Labels
-----|-------------|------|-------
`wmi_remote_fx_gfx_average_encoding_time_seconds` | Average frame encoding time. | gauge | `session_name`
`wmi_remote_fx_gfx_frame_quality` | Quality of the output frame expressed as a percentage of the quality of the source frame. | gauge | `session_name`
`wmi_remote_fx_gfx_frames_skipped_insufficient_resource_total` | Number of frames skipped per second due to insufficient resources. resources are client, server or network. | counter | `session_name`, `resource`
`wmi_remote_fx_gfx_graphics_compression_ratio` | Ratio of the number of bytes encoded to the number of bytes input. | gauge | `session_name`
`wmi_remote_fx_gfx_input_frames_total` | Number of sources frames provided as input to RemoteFX graphics per second. | counter | `session_name`
`wmi_remote_fx_gfx_output_frames_total` | Number of frames sent to the client per second. | counter | `session_name`
`wmi_remote_fx_gfx_source_frames_total` | Number of frames composed by the source (DWM) per second. | counter | `session_name`
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -20,6 +20,7 @@ Example: `--collector.service.services-where="Name='wmi_exporter'"`
Name | Description | Type | Labels
-----|-------------|------|-------
`wmi_service_info` | Contains service information in labels, constant 1 | gauge | name, display_name, process_id
`wmi_service_state` | The state of the service, 1 if the current state, 0 otherwise | gauge | name, state
`wmi_service_start_mode` | The start mode of the service, 1 if the current start mode, 0 otherwise | gauge | name, start_mode
`wmi_service_status` | The status of the service, 1 if the current status, 0 otherwise | gauge | name, status

View File

@@ -0,0 +1,48 @@
# terminal_services collector
The terminal_services collector exposes terminal services (Remote Desktop Services) performance metrics.
|||
-|-
Metric name prefix | `terminal_services`
Data source | Perflib/WMI
Classes | [`Win32_PerfRawData_LocalSessionManager_TerminalServices`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/), [`Win32_PerfRawData_TermService_TerminalServicesSession`](https://docs.microsoft.com/en-us/previous-versions/aa394344(v%3Dvs.85)), [`Win32_PerfRawData_RemoteDesktopConnectionBrokerPerformanceCounterProvider_RemoteDesktopConnectionBrokerCounterset`](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/mt729067(v%3Dws.11))
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`wmi_terminal_services_local_session_count` | Number of local Terminal Services sessions. | gauge | `session`
`wmi_terminal_services_connection_broker_performance_total`* | The total number of connections handled by the Connection Brokers since the service started. | counter | `connection`
`wmi_terminal_services_handle_count` | Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process. | gauge | `session_name`
`wmi_terminal_services_page_fault_total` | Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page. | counter | `session_name`
`wmi_terminal_services_page_file_bytes` | Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `session_name`
`wmi_terminal_services_page_file_bytes_peak` | Maximum number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `session_name`
`wmi_terminal_services_privileged_time_seconds_total` | total elapsed time that the threads of the process have spent executing code in privileged mode. | Counter | `session_name`
`wmi_terminal_services_processor_time_seconds_total` | total elapsed time that all of the threads of this process used the processor to execute instructions. | Counter | `session_name`
`wmi_terminal_services_user_time_seconds_total` | total elapsed time that this process's threads have spent executing code in user mode. Applications, environment subsystems, and integral subsystems execute in user mode. | Counter | `session_name`
`wmi_terminal_services_pool_non_paged_bytes` | Number of bytes in the non-paged pool, an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. This property displays the last observed value only; it is not an average. | gauge | `session_name`
`wmi_terminal_services_pool_paged_bytes` | Number of bytes in the paged pool, an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. This property displays the last observed value only; it is not an average. | gauge | `session_name`
`wmi_terminal_services_private_bytes` | Current number of bytes this process has allocated that cannot be shared with other processes. | gauge | `session_name`
`wmi_terminal_services_thread_count` | Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread. | gauge | `session_name`
`wmi_terminal_services_virtual_bytes` | Current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries. | gauge | `session_name`
`wmi_terminal_services_virtual_bytes_peak` | Maximum number of bytes of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process might limit its ability to load libraries. | gauge | `session_name`
`wmi_terminal_services_working_set_bytes` | Current number of bytes in the working set of this process. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `session_name`
`wmi_terminal_services_working_set_bytes_peak` | Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `session_name`
`* wmi_terminal_services_connection_broker_performance_total` only collected if server has `Remote Desktop Connection Broker` role.
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

27
go.sum
View File

@@ -4,15 +4,13 @@ github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
@@ -22,61 +20,44 @@ github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.0.0 h1:lsek0oXi8iFE9L+EXARyHIjU5rlWIhhTkjDz3vHhWWQ=
github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/leoluk/perflib_exporter v0.0.1 h1:MRwP1Ohh/mVevUy4ZUzvSlxnJtm9/NWHeM3aROxnRiQ=
github.com/leoluk/perflib_exporter v0.0.1/go.mod h1:4APOQriqHobMzovXV7guPQv0ynKH6vZD3XNmT2MBc6w=
github.com/leoluk/perflib_exporter v0.1.0 h1:fXe/mDaf9jR+Zk8FjFlcCSksACuIj2VNN4GyKHmQqtA=
github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM=
github.com/matttproud/golang_protobuf_extensions v1.0.0 h1:YNOwxxSJzSUARoD9KRZLzM9Y858MNGCOACTvCW9TSAc=
github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a h1:JLXgXKi9RCmLk8DMn8+PCvN++iwpD3KptUbVvHBsKtU=
github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d h1:iF+U2tTdys559fmqt0MNaC8QLIJh1twxIIOylDGhswM=
github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20180312195533-182114d58262 h1:1NLVUmR8SQ7cNNA5Vo7ronpXbR+5A+9IwIC/bLE7D8Y=
golang.org/x/crypto v0.0.0-20180312195533-182114d58262/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180313075820-8c0ece68c283 h1:DE/w7won1Ns86VoWjUZ4cJS6//TObJntGkxuZ63asRc=
golang.org/x/sys v0.0.0-20180313075820-8c0ece68c283/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=

View File

@@ -11,10 +11,10 @@ Param(
$ErrorActionPreference = "Stop"
if($Credential -ne $null) {
$wmiObject = Get-WMIObject -ComputerName $ComputerName -Credential $Credential -Class $Class
$wmiObject = Get-CimInstance -ComputerName $ComputerName -Credential $Credential -Class $Class
}
else {
$wmiObject = Get-WMIObject -ComputerName $ComputerName -Class $Class
$wmiObject = Get-CimInstance -ComputerName $ComputerName -Class $Class
}
$members = $wmiObject `

View File

@@ -5,7 +5,7 @@ import (
"github.com/prometheus/common/log"
)
func init() {
Factories["{{ .CollectorName | toLower }}"] = New{{ .CollectorName }}Collector
registerCollector("{{ .CollectorName | toLower }}", new{{ .CollectorName }}Collector) // TODO: Add any perflib dependencies here
}
// A {{ .CollectorName }}Collector is a Prometheus collector for WMI {{ .Class }} metrics
type {{ .CollectorName }}Collector struct {
@@ -13,8 +13,8 @@ type {{ .CollectorName }}Collector struct {
{{ $m.Name }} *prometheus.Desc
{{- end }}
}
// New{{ .CollectorName }}Collector ...
func New{{ .CollectorName }}Collector() (Collector, error) {
func new{{ .CollectorName }}Collector() (Collector, error) {
const subsystem = "{{ .CollectorName | toLower }}"
return &{{ .CollectorName }}Collector{
{{- range $m := .Members }}
@@ -27,15 +27,7 @@ func New{{ .CollectorName }}Collector() (Collector, error) {
{{- end }}
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *{{ .CollectorName }}Collector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting {{ .CollectorName | toLower }} metrics:", desc, err)
return err
}
return nil
}
// {{ .Class }} docs:
// - <add link to documentation here>
type {{ .Class }} struct {
@@ -44,7 +36,10 @@ type {{ .Class }} struct {
{{ $m.Name }} {{ $m.Type }}
{{- end }}
}
func (c *{{ .CollectorName }}Collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *{{ .CollectorName }}Collector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []{{ .Class }}
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {