Compare commits

...

16 Commits

Author SHA1 Message Date
Jan-Otto Kröpke
bf6688c1fb service: [0.28.x] fix panic with NewWithConfig code path (#1647)
Co-authored-by: Erik Baranowski <erik.r.baranowski@gmail.com>
2024-09-28 10:53:41 +02:00
Jan-Otto Kröpke
9febf1e97c process: fix Access is denied errors (#1599) 2024-08-31 19:37:19 +02:00
Jan-Otto Kröpke
b53d91e048 cpu_info: Extend processor information (#1597) 2024-08-31 12:22:22 +02:00
Jan-Otto Kröpke
a5d1112fcd cache: fix windows_cache_copy_read_hits_total metric (#1591) 2024-08-31 08:25:37 +02:00
Jan-Otto Kröpke
d8f0665bdc process: introduce info metric; removed creating_process_id label from all process metric (click here for more information) (#1592) 2024-08-30 20:19:41 +02:00
Jan-Otto Kröpke
4f6e6e8b77 *: avoid using default wmi client. (#1590) 2024-08-30 00:26:15 +02:00
Jan-Otto Kröpke
3ce25ff1ef mscluster: merge multiple collector into one (Click here for more information) (#1585) 2024-08-29 22:03:05 +02:00
dependabot[bot]
c99ac2c5aa chore(deps): bump github.com/prometheus/client_golang (#1588)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-27 21:12:58 +02:00
dependabot[bot]
c9510e8c1d chore(deps): bump github.com/Microsoft/hcsshim from 0.12.5 to 0.12.6 (#1587)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-27 21:12:34 +02:00
Jan-Otto Kröpke
4484e96b66 Update bug_report.yaml
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2024-08-27 20:33:11 +02:00
Jan-Otto Kröpke
9705ff0a24 Update bug_report.yaml
Signed-off-by: Jan-Otto Kröpke <github@jkroepke.de>
2024-08-27 20:32:05 +02:00
Jan-Otto Kröpke
89c832feb0 chore: pass context aware logger to collectors (#1582) 2024-08-24 19:14:38 +02:00
Jan-Otto Kröpke
fbead0fb79 terminal_services: remove old metric from docs (#1583) 2024-08-24 16:57:34 +02:00
Jan-Otto Kröpke
0f4ef8e781 chore: Update TEXTFILE_DIRS description in README (#1581) 2024-08-22 08:49:10 +02:00
Jan-Otto Kröpke
56709b9638 terminal_services: Avoid duplicate metrics by add session_id (#1576) 2024-08-20 19:24:08 +02:00
dependabot[bot]
2589e56f95 chore(deps): bump github.com/prometheus/client_golang from 1.19.1 to 1.20.0 (#1574)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-19 17:23:39 +02:00
97 changed files with 3282 additions and 3560 deletions

View File

@@ -6,6 +6,27 @@ body:
attributes: attributes:
value: Thanks for taking the time to fill out this bug report! value: Thanks for taking the time to fill out this bug report!
- type: markdown
attributes:
value: |-
> [!NOTE]
> If you encounter "Counter not found" issues, try to re-build the performance counter first.
```
PS C:\WINDOWS\system32> cd c:\windows\system32
PS C:\windows\system32> lodctr /R
Error: Unable to rebuild performance counter setting from system backup store, error code is 2
PS C:\windows\system32> cd ..
PS C:\windows> cd syswow64
PS C:\windows\syswow64> lodctr /R
Info: Successfully rebuilt performance counter setting from system backup store
PS C:\windows\syswow64> winmgmt.exe /RESYNCPERF
```
----
- type: textarea - type: textarea
attributes: attributes:
label: Current Behavior label: Current Behavior

View File

@@ -100,5 +100,5 @@ jobs:
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v6 uses: golangci/golangci-lint-action@v6
with: with:
version: v1.59 version: v1.60
args: "--timeout=5m" args: "--timeout=5m --max-same-issues=0"

View File

@@ -10,6 +10,7 @@ linters:
- err113 - err113
- exhaustive - exhaustive
- exhaustruct - exhaustruct
- exportloopref
- fatcontext - fatcontext
- funlen - funlen
- gochecknoglobals - gochecknoglobals
@@ -33,6 +34,10 @@ linters:
- maintidx - maintidx
linters-settings: linters-settings:
gosec:
excludes:
- G115 # integer overflow conversion
gci: gci:
sections: sections:
- prefix(github.com/prometheus-community/windows_exporter/pkg/initiate) - prefix(github.com/prometheus-community/windows_exporter/pkg/initiate)

View File

@@ -14,7 +14,7 @@ Name | Description | Enabled by default
[cache](docs/collector.cache.md) | Cache metrics | [cache](docs/collector.cache.md) | Cache metrics |
[cpu](docs/collector.cpu.md) | CPU usage | &#10003; [cpu](docs/collector.cpu.md) | CPU usage | &#10003;
[cpu_info](docs/collector.cpu_info.md) | CPU Information | [cpu_info](docs/collector.cpu_info.md) | CPU Information |
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | &#10003; [cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) |
[container](docs/collector.container.md) | Container metrics | [container](docs/collector.container.md) | Container metrics |
[diskdrive](docs/collector.diskdrive.md) | Diskdrive metrics | [diskdrive](docs/collector.diskdrive.md) | Diskdrive metrics |
[dfsr](docs/collector.dfsr.md) | DFSR metrics | [dfsr](docs/collector.dfsr.md) | DFSR metrics |
@@ -28,11 +28,7 @@ Name | Description | Enabled by default
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | &#10003; [logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | &#10003;
[logon](docs/collector.logon.md) | User logon sessions | [logon](docs/collector.logon.md) | User logon sessions |
[memory](docs/collector.memory.md) | Memory usage metrics | [memory](docs/collector.memory.md) | Memory usage metrics |
[mscluster_cluster](docs/collector.mscluster_cluster.md) | MSCluster cluster metrics | [mscluster](docs/collector.mscluster.md) | MSCluster metrics |
[mscluster_network](docs/collector.mscluster_network.md) | MSCluster network metrics |
[mscluster_node](docs/collector.mscluster_node.md) | MSCluster Node metrics |
[mscluster_resource](docs/collector.mscluster_resource.md) | MSCluster Resource metrics |
[mscluster_resourcegroup](docs/collector.mscluster_resourcegroup.md) | MSCluster ResourceGroup metrics |
[msmq](docs/collector.msmq.md) | MSMQ queues | [msmq](docs/collector.msmq.md) | MSMQ queues |
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics | [mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
[netframework_clrexceptions](docs/collector.netframework_clrexceptions.md) | .NET Framework CLR Exceptions | [netframework_clrexceptions](docs/collector.netframework_clrexceptions.md) | .NET Framework CLR Exceptions |
@@ -104,17 +100,17 @@ Each release provides a .msi installer. The installer will setup the windows_exp
If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc. The following parameters are available: If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc. The following parameters are available:
| Name | Description | | Name | Description |
|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| |----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors | | `ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors |
| `LISTEN_ADDR` | The IP address to bind to. Defaults to an empty string. (any local address) | | `LISTEN_ADDR` | The IP address to bind to. Defaults to an empty string. (any local address) |
| `LISTEN_PORT` | The port to bind to. Defaults to `9182`. | | `LISTEN_PORT` | The port to bind to. Defaults to `9182`. |
| `METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics` | | `METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics` |
| `TEXTFILE_DIRS` | As the `--collector.textfile.directories` flag, provide a directory to read text files with metrics from | | `TEXTFILE_DIRS` | Use the `--collector.textfile.directories` flag to specify one or more directories, separated by commas, where the collector should read text files containing metrics |
| `REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address). | | `REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address). |
| `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. | | `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. |
| `ADD_FIREWALL_EXCEPTION` | Setup an firewall exception for windows_exporter. Defaults to `yes`. | | `ADD_FIREWALL_EXCEPTION` | Setup an firewall exception for windows_exporter. Defaults to `yes`. |
| `ENABLE_V1_PERFORMANCE_COUNTERS` | Enables V1 performance counter on modern systems. Defaults to `yes`. | | `ENABLE_V1_PERFORMANCE_COUNTERS` | Enables V1 performance counter on modern systems. Defaults to `yes`. |
Parameters are sent to the installer via `msiexec`. Example invocations: Parameters are sent to the installer via `msiexec`. Example invocations:

View File

@@ -21,11 +21,7 @@ This directory contains documentation of the collectors in the windows_exporter,
- [`logical_disk`](collector.logical_disk.md) - [`logical_disk`](collector.logical_disk.md)
- [`logon`](collector.logon.md) - [`logon`](collector.logon.md)
- [`memory`](collector.memory.md) - [`memory`](collector.memory.md)
- [`mscluster_cluster`](collector.mscluster_cluster.md) - [`mscluster`](collector.mscluster.md)
- [`mscluster_network`](collector.mscluster_network.md)
- [`mscluster_node`](collector.mscluster_node.md)
- [`mscluster_resource`](collector.mscluster_resource.md)
- [`mscluster_resourcegroup`](collector.mscluster_resourcegroup.md)
- [`msmq`](collector.msmq.md) - [`msmq`](collector.msmq.md)
- [`mssql`](collector.mssql.md) - [`mssql`](collector.mssql.md)
- [`net`](collector.net.md) - [`net`](collector.net.md)

View File

@@ -15,13 +15,39 @@ None
## Metrics ## Metrics
Name | Description | Type | Labels | Name | Description | Type | Labels |
-----|-------------|------|------- |--------------------------------------------|--------------------------------------|-------|--------------------------------------------------------------|
`windows_cpu_info` | Labelled CPU information | gauge | `architecture`, `device_id`, `description`, `family`, `l2_cache_size` `l3_cache_size`, `name` | `windows_cpu_info` | Labelled CPU information | gauge | `architecture`, `description`, `device_id`, `family`, `name` |
| `windows_cpu_info_core` | Number of cores per CPU | gauge | `device_id` |
| `windows_cpu_info_enabled_core` | Number of enabled cores per CPU | gauge | `device_id` |
| `windows_cpu_info_l2_cache_size` | Size of L2 cache per CPU | gauge | `device_id` |
| `windows_cpu_info_l3_cache_size` | Size of L3 cache per CPU | gauge | `device_id` |
| `windows_cpu_info_logical_processor` | Number of logical processors per CPU | gauge | `device_id` |
| `windows_cpu_info_thread` | Number of threads per CPU | gauge | `device_id` |
### Example metric ### Example metric
``` ```
windows_cpu_info{architecture="9",description="AMD64 Family 23 Model 49 Stepping 0",device_id="CPU0",family="107",l2_cache_size="32768",l3_cache_size="262144",name="AMD EPYC 7702P 64-Core Processor"} 1 # HELP windows_cpu_info Labelled CPU information as provided by Win32_Processor
# TYPE windows_cpu_info gauge
windows_cpu_info{architecture="9",description="AMD64 Family 25 Model 33 Stepping 2",device_id="CPU0",family="107",name="AMD Ryzen 9 5900X 12-Core Processor"} 1
# HELP windows_cpu_info_core Number of cores per CPU
# TYPE windows_cpu_info_core gauge
windows_cpu_info_core{device_id="CPU0"} 12
# HELP windows_cpu_info_enabled_core Number of enabled cores per CPU
# TYPE windows_cpu_info_enabled_core gauge
windows_cpu_info_enabled_core{device_id="CPU0"} 12
# HELP windows_cpu_info_l2_cache_size Size of L2 cache per CPU
# TYPE windows_cpu_info_l2_cache_size gauge
windows_cpu_info_l2_cache_size{device_id="CPU0"} 6144
# HELP windows_cpu_info_l3_cache_size Size of L3 cache per CPU
# TYPE windows_cpu_info_l3_cache_size gauge
windows_cpu_info_l3_cache_size{device_id="CPU0"} 65536
# HELP windows_cpu_info_logical_processor Number of logical processors per CPU
# TYPE windows_cpu_info_logical_processor gauge
windows_cpu_info_logical_processor{device_id="CPU0"} 24
# HELP windows_cpu_info_thread Number of threads per CPU
# TYPE windows_cpu_info_thread gauge
windows_cpu_info_thread{device_id="CPU0"} 24
``` ```
The value of the metric is irrelevant, but the labels expose some useful information on the CPU installed in each socket. The value of the metric is irrelevant, but the labels expose some useful information on the CPU installed in each socket.

186
docs/collector.mscluster.md Normal file
View File

@@ -0,0 +1,186 @@
# mscluster_cluster collector
The MSCluster_Cluster class is a dynamic WMI class that represents a cluster.
|||
-|-
Metric name prefix | `mscluster`
Classes | `MSCluster_Cluster`,`MSCluster_Network`,`MSCluster_Node`,`MSCluster_Resource`,`MSCluster_ResourceGroup`
Enabled by default? | No
## Flags
### `--collectors.mscluster.enabled`
Comma-separated list of collectors to use, for example:
`--collectors.mscluster.enabled=cluster,network,node,resource,resouregroup`.
Matching is case-sensitive.
## Metrics
### Cluster
| Name | Description | Type | Labels |
|-------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|--------|
| `mscluster_cluster_AddEvictDelay` | Provides access to the cluster's AddEvictDelay property, which is the number a seconds that a new node is delayed after an eviction of another node. | gauge | `name` |
| `mscluster_cluster_AdminAccessPoint` | The type of the cluster administrative access point. | gauge | `name` |
| `mscluster_cluster_AutoAssignNodeSite` | Determines whether or not the cluster will attempt to automatically assign nodes to sites based on networks and Active Directory Site information. | gauge | `name` |
| `mscluster_cluster_AutoBalancerLevel` | Determines the level of aggressiveness of AutoBalancer. | gauge | `name` |
| `mscluster_cluster_AutoBalancerMode` | Determines whether or not the auto balancer is enabled. | gauge | `name` |
| `mscluster_cluster_BackupInProgress` | Indicates whether a backup is in progress. | gauge | `name` |
| `mscluster_cluster_BlockCacheSize` | CSV BlockCache Size in MB. | gauge | `name` |
| `mscluster_cluster_ClusSvcHangTimeout` | Controls how long the cluster network driver waits between Failover Cluster Service heartbeats before it determines that the Failover Cluster Service has stopped responding. | gauge | `name` |
| `mscluster_cluster_ClusSvcRegroupOpeningTimeout` | Controls how long a node will wait on other nodes in the opening stage before deciding that they failed. | gauge | `name` |
| `mscluster_cluster_ClusSvcRegroupPruningTimeout` | Controls how long the membership leader will wait to reach full connectivity between cluster nodes. | gauge | `name` |
| `mscluster_cluster_ClusSvcRegroupStageTimeout` | Controls how long a node will wait on other nodes in a membership stage before deciding that they failed. | gauge | `name` |
| `mscluster_cluster_ClusSvcRegroupTickInMilliseconds` | Controls how frequently the membership algorithm is sending periodic membership messages. | gauge | `name` |
| `mscluster_cluster_ClusterEnforcedAntiAffinity` | Enables or disables hard enforcement of group anti-affinity classes. | gauge | `name` |
| `mscluster_cluster_ClusterFunctionalLevel` | The functional level the cluster is currently running in. | gauge | `name` |
| `mscluster_cluster_ClusterGroupWaitDelay` | Maximum time in seconds that a group waits for its preferred node to come online during cluster startup before coming online on a different node. | gauge | `name` |
| `mscluster_cluster_ClusterLogLevel` | Controls the level of cluster logging. | gauge | `name` |
| `mscluster_cluster_ClusterLogSize` | Controls the maximum size of the cluster log files on each of the nodes. | gauge | `name` |
| `mscluster_cluster_ClusterUpgradeVersion` | Specifies the upgrade version the cluster is currently running in. | gauge | `name` |
| `mscluster_cluster_CrossSiteDelay` | Controls how long the cluster network driver waits in milliseconds between sending Cluster Service heartbeats across sites. | gauge | `name` |
| `mscluster_cluster_CrossSiteThreshold` | Controls how many Cluster Service heartbeats can be missed across sites before it determines that Cluster Service has stopped responding. | gauge | `name` |
| `mscluster_cluster_CrossSubnetDelay` | Controls how long the cluster network driver waits in milliseconds between sending Cluster Service heartbeats across subnets. | gauge | `name` |
| `mscluster_cluster_CrossSubnetThreshold` | Controls how many Cluster Service heartbeats can be missed across subnets before it determines that Cluster Service has stopped responding. | gauge | `name` |
| `mscluster_cluster_CsvBalancer` | Whether automatic balancing for CSV is enabled. | gauge | `name` |
| `mscluster_cluster_DatabaseReadWriteMode` | Sets the database read and write mode. | gauge | `name` |
| `mscluster_cluster_DefaultNetworkRole` | Provides access to the cluster's DefaultNetworkRole property. | gauge | `name` |
| `mscluster_cluster_DetectedCloudPlatform` | | gauge | `name` |
| `mscluster_cluster_DetectManagedEvents` | | gauge | `name` |
| `mscluster_cluster_DetectManagedEventsThreshold` | | gauge | `name` |
| `mscluster_cluster_DisableGroupPreferredOwnerRandomization` | | gauge | `name` |
| `mscluster_cluster_DrainOnShutdown` | Whether to drain the node when cluster service is being stopped. | gauge | `name` |
| `mscluster_cluster_DynamicQuorumEnabled` | Allows cluster service to adjust node weights as needed to increase availability. | gauge | `name` |
| `mscluster_cluster_EnableSharedVolumes` | Enables or disables cluster shared volumes on this cluster. | gauge | `name` |
| `mscluster_cluster_FixQuorum` | Provides access to the cluster's FixQuorum property, which specifies if the cluster is in a fix quorum state. | gauge | `name` |
| `mscluster_cluster_GracePeriodEnabled` | Whether the node grace period feature of this cluster is enabled. | gauge | `name` |
| `mscluster_cluster_GracePeriodTimeout` | The grace period timeout in milliseconds. | gauge | `name` |
| `mscluster_cluster_GroupDependencyTimeout` | The timeout after which a group will be brought online despite unsatisfied dependencies | gauge | `name` |
| `mscluster_cluster_HangRecoveryAction` | Controls the action to take if the user-mode processes have stopped responding. | gauge | `name` |
| `mscluster_cluster_IgnorePersistentStateOnStartup` | Provides access to the cluster's IgnorePersistentStateOnStartup property, which specifies whether the cluster will bring online groups that were online when the cluster was shut down. | gauge | `name` |
| `mscluster_cluster_LogResourceControls` | Controls the logging of resource controls. | gauge | `name` |
| `mscluster_cluster_LowerQuorumPriorityNodeId` | Specifies the Node ID that has a lower priority when voting for quorum is performed. If the quorum vote is split 50/50%, the specified node's vote would be ignored to break the tie. If this is not set then the cluster will pick a node at random to break the tie. | gauge | `name` |
| `mscluster_cluster_MaxNumberOfNodes` | Indicates the maximum number of nodes that may participate in the Cluster. | gauge | `name` |
| `mscluster_cluster_MessageBufferLength` | The maximum unacknowledged message count for GEM. | gauge | `name` |
| `mscluster_cluster_MinimumNeverPreemptPriority` | Groups with this priority or higher cannot be preempted. | gauge | `name` |
| `mscluster_cluster_MinimumPreemptorPriority` | Minimum priority a cluster group must have to be able to preempt another group. | gauge | `name` |
| `mscluster_cluster_NetftIPSecEnabled` | Whether IPSec is enabled for cluster internal traffic. | gauge | `name` |
| `mscluster_cluster_PlacementOptions` | Various option flags to modify default placement behavior. | gauge | `name` |
| `mscluster_cluster_PlumbAllCrossSubnetRoutes` | Plumbs all possible cross subnet routes to all nodes. | gauge | `name` |
| `mscluster_cluster_PreventQuorum` | Whether the cluster will ignore group persistent state on startup. | gauge | `name` |
| `mscluster_cluster_QuarantineDuration` | The quarantine period timeout in milliseconds. | gauge | `name` |
| `mscluster_cluster_QuarantineThreshold` | Number of node failures before it will be quarantined. | gauge | `name` |
| `mscluster_cluster_QuorumArbitrationTimeMax` | Controls the maximum time necessary to decide the Quorum owner node. | gauge | `name` |
| `mscluster_cluster_QuorumArbitrationTimeMin` | Controls the minimum time necessary to decide the Quorum owner node. | gauge | `name` |
| `mscluster_cluster_QuorumLogFileSize` | This property is obsolete. | gauge | `name` |
| `mscluster_cluster_QuorumTypeValue` | Get the current quorum type value. -1: Unknown; 1: Node; 2: FileShareWitness; 3: Storage; 4: None | gauge | `name` |
| `mscluster_cluster_RequestReplyTimeout` | Controls the request reply time-out period. | gauge | `name` |
| `mscluster_cluster_ResiliencyDefaultPeriod` | The default resiliency period, in seconds, for the cluster. | gauge | `name` |
| `mscluster_cluster_ResiliencyLevel` | The resiliency level for the cluster. | gauge | `name` |
| `mscluster_cluster_ResourceDllDeadlockPeriod` | This property is obsolete. | gauge | `name` |
| `mscluster_cluster_RootMemoryReserved` | Controls the amount of memory reserved for the parent partition on all cluster nodes. | gauge | `name` |
| `mscluster_cluster_RouteHistoryLength` | The history length for routes to help finding network issues. | gauge | `name` |
| `mscluster_cluster_S2DBusTypes` | Bus types for storage spaces direct. | gauge | `name` |
| `mscluster_cluster_S2DCacheDesiredState` | Desired state of the storage spaces direct cache. | gauge | `name` |
| `mscluster_cluster_S2DCacheFlashReservePercent` | Percentage of allocated flash space to utilize when caching. | gauge | `name` |
| `mscluster_cluster_S2DCachePageSizeKBytes` | Page size in KB used by S2D cache. | gauge | `name` |
| `mscluster_cluster_S2DEnabled` | Whether direct attached storage (DAS) is enabled. | gauge | `name` |
| `mscluster_cluster_S2DIOLatencyThreshold` | The I/O latency threshold for storage spaces direct. | gauge | `name` |
| `mscluster_cluster_S2DOptimizations` | Optimization flags for storage spaces direct. | gauge | `name` |
| `mscluster_cluster_SameSubnetDelay` | Controls how long the cluster network driver waits in milliseconds between sending Cluster Service heartbeats on the same subnet. | gauge | `name` |
| `mscluster_cluster_SameSubnetThreshold` | Controls how many Cluster Service heartbeats can be missed on the same subnet before it determines that Cluster Service has stopped responding. | gauge | `name` |
| `mscluster_cluster_SecurityLevel` | Controls the level of security that should apply to intracluster messages. 0: Clear Text; 1: Sign; 2: Encrypt | gauge | `name` |
| `mscluster_cluster_SecurityLevelForStorage` | | gauge | `name` |
| `mscluster_cluster_SharedVolumeVssWriterOperationTimeout` | CSV VSS Writer operation timeout in seconds. | gauge | `name` |
| `mscluster_cluster_ShutdownTimeoutInMinutes` | The maximum time in minutes allowed for cluster resources to come offline during cluster service shutdown. | gauge | `name` |
| `mscluster_cluster_UseClientAccessNetworksForSharedVolumes` | Whether the use of client access networks for cluster shared volumes feature of this cluster is enabled. 0: Disabled; 1: Enabled; 2: Auto | gauge | `name` |
| `mscluster_cluster_WitnessDatabaseWriteTimeout` | Controls the maximum time in seconds that a cluster database write to a witness can take before the write is abandoned. | gauge | `name` |
| `mscluster_cluster_WitnessDynamicWeight` | The weight of the configured witness. | gauge | `name` |
| `mscluster_cluster_WitnessRestartInterval` | Controls the witness restart interval. | gauge | `name` |
### Network
| Name | Description | Type | Labels |
|-------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|--------|
| `mscluster_network_Characteristics` | Provides the characteristics of the network. The cluster defines characteristics only for resources. For a description of these characteristics, see [CLUSCTL_RESOURCE_GET_CHARACTERISTICS](https://msdn.microsoft.com/library/aa367466). | gauge | `name` |
| `mscluster_network_Flags` | Provides access to the flags set for the network. The cluster defines flags only for resources. For a description of these flags, see [CLUSCTL_RESOURCE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-flags). | gauge | `name` |
| `mscluster_network_Metric` | The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false. | gauge | `name` |
| `mscluster_network_Role` | Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both | gauge | `name` |
| `mscluster_network_State` | Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up | gauge | `name` |
### Network
| Name | Description | Type | Labels |
|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|--------|
| `mscluster_node_BuildNumber` | Provides access to the node's BuildNumber property. | gauge | `name` |
| `mscluster_node_Characteristics` | Provides access to the characteristics set for the node. For a list of possible characteristics, see [CLUSCTL_NODE_GET_CHARACTERISTICS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-node-get-characteristics). | gauge | `name` |
| `mscluster_node_DetectedCloudPlatform` | The dynamic vote weight of the node adjusted by dynamic quorum feature. | gauge | `name` |
| `mscluster_node_DynamicWeight` | The dynamic vote weight of the node adjusted by dynamic quorum feature. | gauge | `name` |
| `mscluster_node_Flags` | Provides access to the flags set for the node. For a list of possible characteristics, see [CLUSCTL_NODE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-node-get-flags). | gauge | `name` |
| `mscluster_node_MajorVersion` | Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed. | gauge | `name` |
| `mscluster_node_MinorVersion` | Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed. | gauge | `name` |
| `mscluster_node_NeedsPreventQuorum` | Whether the cluster service on that node should be started with prevent quorum flag. | gauge | `name` |
| `mscluster_node_NodeDrainStatus` | The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed | gauge | `name` |
| `mscluster_node_NodeHighestVersion` | Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate. | gauge | `name` |
| `mscluster_node_NodeLowestVersion` | Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate. | gauge | `name` |
| `mscluster_node_NodeWeight` | The vote weight of the node. | gauge | `name` |
| `mscluster_node_State` | Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining | gauge | `name` |
| `mscluster_node_StatusInformation` | The isolation or quarantine status of the node. | gauge | `name` |
### Resource
| Name | Description | Type | Labels |
|---------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|--------------------------------------------|
| `mscluster_resource_Characteristics` | Provides the characteristics of the object. The cluster defines characteristics only for resources. For a description of these characteristics, see [CLUSCTL_RESOURCE_GET_CHARACTERISTICS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-characteristics). | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_DeadlockTimeout` | Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_EmbeddedFailureAction` | The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_Flags` | Provides access to the flags set for the object. The cluster defines flags only for resources. For a description of these flags, see [CLUSCTL_RESOURCE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-flags). | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_IsAlivePollInterval` | Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_LooksAlivePollInterval` | Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_MonitorProcessId` | Provides the process ID of the resource host service that is currently hosting the resource. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_OwnerNode` | The node hosting the resource. | gauge | `type`, `owner_group`, `node_name`, `name` |
| `mscluster_resource_PendingTimeout` | Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_ResourceClass` | Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_RestartAction` | Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_RestartDelay` | Indicates the time delay before a failed resource is restarted. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_RestartPeriod` | Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_RestartThreshold` | Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_RetryPeriodOnFailure` | Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it. | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_State` | The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending | gauge | `type`, `owner_group`, `name` |
| `mscluster_resource_Subclass` | Provides the list of references to nodes that can be the owner of this resource. | gauge | `type`, `owner_group`, `name` |
## ResourceGroup
| Name | Description | Type | Labels |
|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|---------------------|
| `mscluster_resourcegroup_AutoFailbackType` | Provides access to the group's AutoFailbackType property. | gauge | `name` |
| `mscluster_resourcegroup_Characteristics` | Provides the characteristics of the group. The cluster defines characteristics only for resources. For a description of these characteristics, see [CLUSCTL_RESOURCE_GET_CHARACTERISTICS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-characteristics). | gauge | `name` |
| `mscluster_resourcegroup_ColdStartSetting` | Indicates whether a group can start after a cluster cold start. | gauge | `name` |
| `mscluster_resourcegroup_DefaultOwner` | Number of the last node the resource group was activated on or explicitly moved to. | gauge | `name` |
| `mscluster_resourcegroup_FailbackWindowEnd` | The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node. | gauge | `name` |
| `mscluster_resourcegroup_FailbackWindowStart` | The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node. | gauge | `name` |
| `mscluster_resourcegroup_FailoverPeriod` | The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur. | gauge | `name` |
| `mscluster_resourcegroup_FailoverThreshold` | The FailoverThreshold property specifies the maximum number of failover attempts. | gauge | `name` |
| `mscluster_resourcegroup_Flags` | Provides access to the flags set for the group. The cluster defines flags only for resources. For a description of these flags, see [CLUSCTL_RESOURCE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-flags). | gauge | `name` |
| `mscluster_resourcegroup_GroupType` | The Type of the resource group. | gauge | `name` |
| `mscluster_resourcegroup_OwnerNode` | The node hosting the resource group. | gauge | `node_name`, `name` |
| `mscluster_resourcegroup_Priority` | Priority value of the resource group | gauge | `name` |
| `mscluster_resourcegroup_ResiliencyPeriod` | The resiliency period for this group, in seconds. | gauge | `name` |
| `mscluster_resourcegroup_State` | The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending | gauge | `name` |
| `mscluster_resourcegroup_UpdateDomain` | | gauge | `name` |
### Example metric
Query the state of all cluster resource owned by node1
```
windows_mscluster_resource_owner_node{node_name="node1"}
```
## Useful queries
Counts the number of Network Name cluster resource
```
count(windows_mscluster_resource_state{type="Network Name"})
```
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,104 +0,0 @@
# mscluster_cluster collector
The MSCluster_Cluster class is a dynamic WMI class that represents a cluster.
|||
-|-
Metric name prefix | `mscluster_cluster`
Classes | `MSCluster_Cluster`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`AddEvictDelay` | Provides access to the cluster's AddEvictDelay property, which is the number a seconds that a new node is delayed after an eviction of another node. | gauge | `name`
`AdminAccessPoint` | The type of the cluster administrative access point. | gauge | `name`
`AutoAssignNodeSite` | Determines whether or not the cluster will attempt to automatically assign nodes to sites based on networks and Active Directory Site information. | gauge | `name`
`AutoBalancerLevel` | Determines the level of aggressiveness of AutoBalancer. | gauge | `name`
`AutoBalancerMode` | Determines whether or not the auto balancer is enabled. | gauge | `name`
`BackupInProgress` | Indicates whether a backup is in progress. | gauge | `name`
`BlockCacheSize` | CSV BlockCache Size in MB. | gauge | `name`
`ClusSvcHangTimeout` | Controls how long the cluster network driver waits between Failover Cluster Service heartbeats before it determines that the Failover Cluster Service has stopped responding. | gauge | `name`
`ClusSvcRegroupOpeningTimeout` | Controls how long a node will wait on other nodes in the opening stage before deciding that they failed. | gauge | `name`
`ClusSvcRegroupPruningTimeout` | Controls how long the membership leader will wait to reach full connectivity between cluster nodes. | gauge | `name`
`ClusSvcRegroupStageTimeout` | Controls how long a node will wait on other nodes in a membership stage before deciding that they failed. | gauge | `name`
`ClusSvcRegroupTickInMilliseconds` | Controls how frequently the membership algorithm is sending periodic membership messages. | gauge | `name`
`ClusterEnforcedAntiAffinity` | Enables or disables hard enforcement of group anti-affinity classes. | gauge | `name`
`ClusterFunctionalLevel` | The functional level the cluster is currently running in. | gauge | `name`
`ClusterGroupWaitDelay` | Maximum time in seconds that a group waits for its preferred node to come online during cluster startup before coming online on a different node. | gauge | `name`
`ClusterLogLevel` | Controls the level of cluster logging. | gauge | `name`
`ClusterLogSize` | Controls the maximum size of the cluster log files on each of the nodes. | gauge | `name`
`ClusterUpgradeVersion` | Specifies the upgrade version the cluster is currently running in. | gauge | `name`
`CrossSiteDelay` | Controls how long the cluster network driver waits in milliseconds between sending Cluster Service heartbeats across sites. | gauge | `name`
`CrossSiteThreshold` | Controls how many Cluster Service heartbeats can be missed across sites before it determines that Cluster Service has stopped responding. | gauge | `name`
`CrossSubnetDelay` | Controls how long the cluster network driver waits in milliseconds between sending Cluster Service heartbeats across subnets. | gauge | `name`
`CrossSubnetThreshold` | Controls how many Cluster Service heartbeats can be missed across subnets before it determines that Cluster Service has stopped responding. | gauge | `name`
`CsvBalancer` | Whether automatic balancing for CSV is enabled. | gauge | `name`
`DatabaseReadWriteMode` | Sets the database read and write mode. | gauge | `name`
`DefaultNetworkRole` | Provides access to the cluster's DefaultNetworkRole property. | gauge | `name`
`DetectedCloudPlatform` | | gauge | `name`
`DetectManagedEvents` | | gauge | `name`
`DetectManagedEventsThreshold` | | gauge | `name`
`DisableGroupPreferredOwnerRandomization` | | gauge | `name`
`DrainOnShutdown` | Whether to drain the node when cluster service is being stopped. | gauge | `name`
`DynamicQuorumEnabled` | Allows cluster service to adjust node weights as needed to increase availability. | gauge | `name`
`EnableSharedVolumes` | Enables or disables cluster shared volumes on this cluster. | gauge | `name`
`FixQuorum` | Provides access to the cluster's FixQuorum property, which specifies if the cluster is in a fix quorum state. | gauge | `name`
`GracePeriodEnabled` | Whether the node grace period feature of this cluster is enabled. | gauge | `name`
`GracePeriodTimeout` | The grace period timeout in milliseconds. | gauge | `name`
`GroupDependencyTimeout` | The timeout after which a group will be brought online despite unsatisfied dependencies | gauge | `name`
`HangRecoveryAction` | Controls the action to take if the user-mode processes have stopped responding. | gauge | `name`
`IgnorePersistentStateOnStartup` | Provides access to the cluster's IgnorePersistentStateOnStartup property, which specifies whether the cluster will bring online groups that were online when the cluster was shut down. | gauge | `name`
`LogResourceControls` | Controls the logging of resource controls. | gauge | `name`
`LowerQuorumPriorityNodeId` | Specifies the Node ID that has a lower priority when voting for quorum is performed. If the quorum vote is split 50/50%, the specified node's vote would be ignored to break the tie. If this is not set then the cluster will pick a node at random to break the tie. | gauge | `name`
`MaxNumberOfNodes` | Indicates the maximum number of nodes that may participate in the Cluster. | gauge | `name`
`MessageBufferLength` | The maximum unacknowledged message count for GEM. | gauge | `name`
`MinimumNeverPreemptPriority` | Groups with this priority or higher cannot be preempted. | gauge | `name`
`MinimumPreemptorPriority` | Minimum priority a cluster group must have to be able to preempt another group. | gauge | `name`
`NetftIPSecEnabled` | Whether IPSec is enabled for cluster internal traffic. | gauge | `name`
`PlacementOptions` | Various option flags to modify default placement behavior. | gauge | `name`
`PlumbAllCrossSubnetRoutes` | Plumbs all possible cross subnet routes to all nodes. | gauge | `name`
`PreventQuorum` | Whether the cluster will ignore group persistent state on startup. | gauge | `name`
`QuarantineDuration` | The quarantine period timeout in milliseconds. | gauge | `name`
`QuarantineThreshold` | Number of node failures before it will be quarantined. | gauge | `name`
`QuorumArbitrationTimeMax` | Controls the maximum time necessary to decide the Quorum owner node. | gauge | `name`
`QuorumArbitrationTimeMin` | Controls the minimum time necessary to decide the Quorum owner node. | gauge | `name`
`QuorumLogFileSize` | This property is obsolete. | gauge | `name`
`QuorumTypeValue` | Get the current quorum type value. -1: Unknown; 1: Node; 2: FileShareWitness; 3: Storage; 4: None | gauge | `name`
`RequestReplyTimeout` | Controls the request reply time-out period. | gauge | `name`
`ResiliencyDefaultPeriod` | The default resiliency period, in seconds, for the cluster. | gauge | `name`
`ResiliencyLevel` | The resiliency level for the cluster. | gauge | `name`
`ResourceDllDeadlockPeriod` | This property is obsolete. | gauge | `name`
`RootMemoryReserved` | Controls the amount of memory reserved for the parent partition on all cluster nodes. | gauge | `name`
`RouteHistoryLength` | The history length for routes to help finding network issues. | gauge | `name`
`S2DBusTypes` | Bus types for storage spaces direct. | gauge | `name`
`S2DCacheDesiredState` | Desired state of the storage spaces direct cache. | gauge | `name`
`S2DCacheFlashReservePercent` | Percentage of allocated flash space to utilize when caching. | gauge | `name`
`S2DCachePageSizeKBytes` | Page size in KB used by S2D cache. | gauge | `name`
`S2DEnabled` | Whether direct attached storage (DAS) is enabled. | gauge | `name`
`S2DIOLatencyThreshold` | The I/O latency threshold for storage spaces direct. | gauge | `name`
`S2DOptimizations` | Optimization flags for storage spaces direct. | gauge | `name`
`SameSubnetDelay` | Controls how long the cluster network driver waits in milliseconds between sending Cluster Service heartbeats on the same subnet. | gauge | `name`
`SameSubnetThreshold` | Controls how many Cluster Service heartbeats can be missed on the same subnet before it determines that Cluster Service has stopped responding. | gauge | `name`
`SecurityLevel` | Controls the level of security that should apply to intracluster messages. 0: Clear Text; 1: Sign; 2: Encrypt | gauge | `name`
`SecurityLevelForStorage` | | gauge | `name`
`SharedVolumeVssWriterOperationTimeout` | CSV VSS Writer operation timeout in seconds. | gauge | `name`
`ShutdownTimeoutInMinutes` | The maximum time in minutes allowed for cluster resources to come offline during cluster service shutdown. | gauge | `name`
`UseClientAccessNetworksForSharedVolumes` | Whether the use of client access networks for cluster shared volumes feature of this cluster is enabled. 0: Disabled; 1: Enabled; 2: Auto | gauge | `name`
`WitnessDatabaseWriteTimeout` | Controls the maximum time in seconds that a cluster database write to a witness can take before the write is abandoned. | gauge | `name`
`WitnessDynamicWeight` | The weight of the configured witness. | gauge | `name`
`WitnessRestartInterval` | Controls the witness restart interval. | gauge | `name`
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,32 +0,0 @@
# mscluster_network collector
The MSCluster_Network class is a dynamic WMI class that represents cluster networks.
|||
-|-
Metric name prefix | `mscluster_network`
Classes | `MSCluster_Network`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`Characteristics` | Provides the characteristics of the network. The cluster defines characteristics only for resources. For a description of these characteristics, see [CLUSCTL_RESOURCE_GET_CHARACTERISTICS](https://msdn.microsoft.com/library/aa367466). | gauge | `name`
`Flags` | Provides access to the flags set for the network. The cluster defines flags only for resources. For a description of these flags, see [CLUSCTL_RESOURCE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-flags). | gauge | `name`
`Metric` | The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false. | gauge | `name`
`Role` | Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both | gauge | `name`
`State` | Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up | gauge | `name`
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,41 +0,0 @@
# mscluster_node collector
The MSCluster_Node class is a dynamic WMI class that represents a cluster node.
|||
-|-
Metric name prefix | `mscluster_node`
Classes | `MSCluster_Node`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`BuildNumber` | Provides access to the node's BuildNumber property. | gauge | `name`
`Characteristics` | Provides access to the characteristics set for the node. For a list of possible characteristics, see [CLUSCTL_NODE_GET_CHARACTERISTICS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-node-get-characteristics). | gauge | `name`
`DetectedCloudPlatform` | The dynamic vote weight of the node adjusted by dynamic quorum feature. | gauge | `name`
`DynamicWeight` | The dynamic vote weight of the node adjusted by dynamic quorum feature. | gauge | `name`
`Flags` | Provides access to the flags set for the node. For a list of possible characteristics, see [CLUSCTL_NODE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-node-get-flags). | gauge | `name`
`MajorVersion` | Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed. | gauge | `name`
`MinorVersion` | Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed. | gauge | `name`
`NeedsPreventQuorum` | Whether the cluster service on that node should be started with prevent quorum flag. | gauge | `name`
`NodeDrainStatus` | The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed | gauge | `name`
`NodeHighestVersion` | Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate. | gauge | `name`
`NodeLowestVersion` | Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate. | gauge | `name`
`NodeWeight` | The vote weight of the node. | gauge | `name`
`State` | Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining | gauge | `name`
`StatusInformation` | The isolation or quarantine status of the node. | gauge | `name`
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,50 +0,0 @@
# mscluster_resource collector
The MSCluster_resource class is a dynamic WMI class that represents a cluster resource.
|||
-|-
Metric name prefix | `mscluster_resource`
Classes | `MSCluster_Resource`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`Characteristics` | Provides the characteristics of the object. The cluster defines characteristics only for resources. For a description of these characteristics, see [CLUSCTL_RESOURCE_GET_CHARACTERISTICS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-characteristics). | gauge | `type`, `owner_group`, `name`
`DeadlockTimeout` | Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource. | gauge | `type`, `owner_group`, `name`
`EmbeddedFailureAction` | The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it. | gauge | `type`, `owner_group`, `name`
`Flags` | Provides access to the flags set for the object. The cluster defines flags only for resources. For a description of these flags, see [CLUSCTL_RESOURCE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-flags). | gauge | `type`, `owner_group`, `name`
`IsAlivePollInterval` | Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource. | gauge | `type`, `owner_group`, `name`
`LooksAlivePollInterval` | Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource. | gauge | `type`, `owner_group`, `name`
`MonitorProcessId` | Provides the process ID of the resource host service that is currently hosting the resource. | gauge | `type`, `owner_group`, `name`
`OwnerNode` | The node hosting the resource. | gauge | `type`, `owner_group`, `node_name`, `name`
`PendingTimeout` | Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated. | gauge | `type`, `owner_group`, `name`
`ResourceClass` | Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown | gauge | `type`, `owner_group`, `name`
`RestartAction` | Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails. | gauge | `type`, `owner_group`, `name`
`RestartDelay` | Indicates the time delay before a failed resource is restarted. | gauge | `type`, `owner_group`, `name`
`RestartPeriod` | Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource. | gauge | `type`, `owner_group`, `name`
`RestartThreshold` | Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property. | gauge | `type`, `owner_group`, `name`
`RetryPeriodOnFailure` | Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it. | gauge | `type`, `owner_group`, `name`
`State` | The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending | gauge | `type`, `owner_group`, `name`
`Subclass` | Provides the list of references to nodes that can be the owner of this resource. | gauge | `type`, `owner_group`, `name`
### Example metric
Query the state of all cluster resource owned by node1
```
windows_mscluster_resource_owner_node{node_name="node1"}
```
## Useful queries
Counts the number of Network Name cluster resource
```
count(windows_mscluster_resource_state{type="Network Name"})
```
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,48 +0,0 @@
# mscluster_resourcegroup collector
The MSCluster_ResourceGroup class is a dynamic WMI class that represents a cluster group.
|||
-|-
Metric name prefix | `mscluster_resourcegroup`
Classes | `MSCluster_ResourceGroup`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`AutoFailbackType` | Provides access to the group's AutoFailbackType property. | gauge | `name`
`Characteristics` | Provides the characteristics of the group. The cluster defines characteristics only for resources. For a description of these characteristics, see [CLUSCTL_RESOURCE_GET_CHARACTERISTICS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-characteristics). | gauge | `name`
`ColdStartSetting` | Indicates whether a group can start after a cluster cold start. | gauge | `name`
`DefaultOwner` | Number of the last node the resource group was activated on or explicitly moved to. | gauge | `name`
`FailbackWindowEnd` | The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node. | gauge | `name`
`FailbackWindowStart` | The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node. | gauge | `name`
`FailoverPeriod` | The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur. | gauge | `name`
`FailoverThreshold` | The FailoverThreshold property specifies the maximum number of failover attempts. | gauge | `name`
`Flags` | Provides access to the flags set for the group. The cluster defines flags only for resources. For a description of these flags, see [CLUSCTL_RESOURCE_GET_FLAGS](https://docs.microsoft.com/en-us/previous-versions/windows/desktop/mscs/clusctl-resource-get-flags). | gauge | `name`
`GroupType` | The Type of the resource group. | gauge | `name`
`OwnerNode` | The node hosting the resource group. | gauge | `node_name`, `name`
`Priority` | Priority value of the resource group | gauge | `name`
`ResiliencyPeriod` | The resiliency period for this group, in seconds. | gauge | `name`
`State` | The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending | gauge | `name`
`UpdateDomain` | | gauge | `name`
### Example metric
Query the state of all cluster group owned by node1
```
windows_mscluster_resourcegroup_owner_node{node_name="node1"}
```
## Useful queries
Counts the number of cluster group by type
```
count_values("count", windows_mscluster_resourcegroup_group_type)
```
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -37,11 +37,6 @@ Enables IIS process name queries. IIS process names are combined with their app
Disabled by default, and can be enabled with `--collector.process.iis=true`. Disabled by default, and can be enabled with `--collector.process.iis=true`.
### `--collector.process.report-owner`
Enables reporting of the process owner. This is a potentially expensive operation.
Disabled by default, and can be enabled with `--collector.process.report-owner`.
### Example ### Example
To match all firefox processes: `--collector.process.include="firefox.*"`. To match all firefox processes: `--collector.process.include="firefox.*"`.
@@ -74,23 +69,24 @@ w3wp_Test
## Metrics ## Metrics
| Name | Description | Type | Labels | | Name | Description | Type | Labels |
|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------------------------------------------------------------| |---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|---------------------------------------------------------------------------------------|
| `windows_process_start_time` | Time of process start | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_info` | A metric with a constant '1' value labeled with process information | gauge | `process`, `process_id`, `creating_process_id`, `process_group_id`,`owner`, `cmdline` |
| `windows_process_cpu_time_total` | Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user). An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions is included in this count. | counter | `process`, `process_id`, `creating_process_id`, `owner`, `mode` | | `windows_process_start_time` | Time of process start | gauge | `process`, `process_id` |
| `windows_process_handles` | Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_cpu_time_total` | Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user). An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions is included in this count. | counter | `process`, `process_id`, `mode` |
| `windows_process_io_bytes_total` | Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `creating_process_id`, `owner`, `mode` | | `windows_process_handles` | Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process. | gauge | `process`, `process_id` |
| `windows_process_io_operations_total` | I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `creating_process_id`, `owner`, `mode` | | `windows_process_io_bytes_total` | Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `mode` |
| `windows_process_page_faults_total` | Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared. | counter | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_io_operations_total` | I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. | counter | `process`, `process_id`, `mode` |
| `windows_process_page_file_bytes` | Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_page_faults_total` | Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared. | counter | `process`, `process_id` |
| `windows_process_pool_bytes` | Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. Nonpaged pool bytes is calculated differently than paged pool bytes, so it might not equal the total of paged pool bytes. | gauge | `process`, `process_id`, `creating_process_id`, `owner`, `pool` | | `windows_process_page_file_bytes` | Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory. | gauge | `process`, `process_id` |
| `windows_process_priority_base` | Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_pool_bytes` | Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. Nonpaged pool bytes is calculated differently than paged pool bytes, so it might not equal the total of paged pool bytes. | gauge | `process`, `process_id`, `pool` |
| `windows_process_private_bytes` | Current number of bytes this process has allocated that cannot be shared with other processes. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_priority_base` | Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process. | gauge | `process`, `process_id` |
| `windows_process_threads` | Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_private_bytes` | Current number of bytes this process has allocated that cannot be shared with other processes. | gauge | `process`, `process_id` |
| `windows_process_virtual_bytes` | Current size, in bytes, of the virtual address space that the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_threads` | Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread. | gauge | `process`, `process_id` |
| `windows_process_working_set_private_bytes` | Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_virtual_bytes` | Current size, in bytes, of the virtual address space that the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries. | gauge | `process`, `process_id` |
| `windows_process_working_set_peak_bytes` | Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before they leave main memory. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_working_set_private_bytes` | Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes. | gauge | `process`, `process_id` |
| `windows_process_working_set_bytes` | Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `process`, `process_id`, `creating_process_id`, `owner` | | `windows_process_working_set_peak_bytes` | Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before they leave main memory. | gauge | `process`, `process_id` |
| `windows_process_working_set_bytes` | Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory. | gauge | `process`, `process_id` |
### Example metric ### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_ _This collector does not yet have explained examples, we would appreciate your help adding them!_

View File

@@ -19,7 +19,6 @@ None
| Name | Description | Type | Labels | | Name | Description | Type | Labels |
|------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------------| |------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------------|
| `windows_terminal_services_session_info` | Info about active WTS sessions | gauge | host,user,state | | `windows_terminal_services_session_info` | Info about active WTS sessions | gauge | host,user,state |
| `windows_terminal_services_local_session_count` | Number of local Terminal Services sessions. | gauge | `session` |
| `windows_terminal_services_connection_broker_performance_total`* | The total number of connections handled by the Connection Brokers since the service started. | counter | `connection` | | `windows_terminal_services_connection_broker_performance_total`* | The total number of connections handled by the Connection Brokers since the service started. | counter | `connection` |
| `windows_terminal_services_handles` | Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process. | gauge | `session_name` | | `windows_terminal_services_handles` | Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process. | gauge | `session_name` |
| `windows_terminal_services_page_fault_total` | Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page. | counter | `session_name` | | `windows_terminal_services_page_fault_total` | Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page. | counter | `session_name` |

View File

@@ -12,6 +12,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"net/http/pprof" "net/http/pprof"
@@ -31,7 +32,6 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/log/flag" "github.com/prometheus-community/windows_exporter/pkg/log/flag"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web" "github.com/prometheus/exporter-toolkit/web"
webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
@@ -87,7 +87,7 @@ func main() {
"config.file", "config.file",
"YAML configuration file to use. Values set in this file will be overridden by CLI flags.", "YAML configuration file to use. Values set in this file will be overridden by CLI flags.",
).String() ).String()
insecure_skip_verify = app.Flag( insecureSkipVerify = app.Flag(
"config.file.insecure-skip-verify", "config.file.insecure-skip-verify",
"Skip TLS verification in loading YAML configuration.", "Skip TLS verification in loading YAML configuration.",
).Default("false").Bool() ).Default("false").Bool()
@@ -147,7 +147,7 @@ func main() {
_ = level.Debug(logger).Log("msg", "Logging has Started") _ = level.Debug(logger).Log("msg", "Logging has Started")
if *configFile != "" { if *configFile != "" {
resolver, err := config.NewResolver(*configFile, logger, *insecure_skip_verify) resolver, err := config.NewResolver(*configFile, logger, *insecureSkipVerify)
if err != nil { if err != nil {
_ = level.Error(logger).Log("msg", "could not load config file", "err", err) _ = level.Error(logger).Log("msg", "could not load config file", "err", err)
os.Exit(1) os.Exit(1)
@@ -195,22 +195,16 @@ func main() {
} }
} }
if err = wmi.InitWbem(logger); err != nil {
_ = level.Error(logger).Log("err", err)
os.Exit(1)
}
enabledCollectorList := utils.ExpandEnabledCollectors(*enabledCollectors) enabledCollectorList := utils.ExpandEnabledCollectors(*enabledCollectors)
collectors.Enable(enabledCollectorList) collectors.Enable(enabledCollectorList)
collectors.SetLogger(logger)
// Initialize collectors before loading // Initialize collectors before loading
err = collectors.Build() err = collectors.Build(logger)
if err != nil { if err != nil {
_ = level.Error(logger).Log("msg", "Couldn't load collectors", "err", err) _ = level.Error(logger).Log("msg", "Couldn't load collectors", "err", err)
os.Exit(1) os.Exit(1)
} }
err = collectors.SetPerfCounterQuery() err = collectors.SetPerfCounterQuery(logger)
if err != nil { if err != nil {
_ = level.Error(logger).Log("msg", "Couldn't set performance counter query", "err", err) _ = level.Error(logger).Log("msg", "Couldn't set performance counter query", "err", err)
os.Exit(1) os.Exit(1)
@@ -229,7 +223,7 @@ func main() {
_ = level.Info(logger).Log("msg", fmt.Sprintf("Enabled collectors: %v", strings.Join(enabledCollectorList, ", "))) _ = level.Info(logger).Log("msg", fmt.Sprintf("Enabled collectors: %v", strings.Join(enabledCollectorList, ", ")))
mux := http.NewServeMux() mux := http.NewServeMux()
mux.HandleFunc(*metricsPath, withConcurrencyLimit(*maxRequests, collectors.BuildServeHTTP(*disableExporterMetrics, *timeoutMargin))) mux.HandleFunc(*metricsPath, withConcurrencyLimit(*maxRequests, collectors.BuildServeHTTP(logger, *disableExporterMetrics, *timeoutMargin)))
mux.HandleFunc("/health", func(w http.ResponseWriter, _ *http.Request) { mux.HandleFunc("/health", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
_, err := fmt.Fprintln(w, `{"status":"ok"}`) _, err := fmt.Fprintln(w, `{"status":"ok"}`)
@@ -274,7 +268,7 @@ func main() {
} }
go func() { go func() {
if err := web.ListenAndServe(server, webConfig, logger); err != nil { if err := web.ListenAndServe(server, webConfig, logger); err != nil && !errors.Is(err, http.ErrServerClosed) {
_ = level.Error(logger).Log("msg", "cannot start windows_exporter", "err", err) _ = level.Error(logger).Log("msg", "cannot start windows_exporter", "err", err)
os.Exit(1) os.Exit(1)
} }

23
go.mod
View File

@@ -3,12 +3,12 @@ module github.com/prometheus-community/windows_exporter
go 1.22 go 1.22
require ( require (
github.com/Microsoft/hcsshim v0.12.5 github.com/Microsoft/hcsshim v0.12.6
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/dimchansky/utfbom v1.1.1 github.com/dimchansky/utfbom v1.1.1
github.com/go-kit/log v0.2.1 github.com/go-kit/log v0.2.1
github.com/go-ole/go-ole v1.3.0 github.com/go-ole/go-ole v1.3.0
github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_golang v1.20.2
github.com/prometheus/client_model v0.6.1 github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.55.0 github.com/prometheus/common v0.55.0
github.com/prometheus/exporter-toolkit v0.11.0 github.com/prometheus/exporter-toolkit v0.11.0
@@ -21,33 +21,36 @@ require (
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
require github.com/pkg/errors v0.9.1 require (
github.com/google/uuid v1.6.0
github.com/pkg/errors v0.9.1
)
require ( require (
github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/cgroups/v3 v3.0.2 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/errdefs v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
golang.org/x/crypto v0.24.0 // indirect golang.org/x/crypto v0.25.0 // indirect
golang.org/x/net v0.26.0 // indirect golang.org/x/net v0.27.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sync v0.7.0 // indirect golang.org/x/sync v0.7.0 // indirect
golang.org/x/text v0.16.0 // indirect golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/grpc v1.62.0 // indirect google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
) )

40
go.sum
View File

@@ -2,8 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0= github.com/Microsoft/hcsshim v0.12.6 h1:qEnZjoHXv+4/s0LmKZWE0/AiZmMWEIkFfWBSf1a0wlU=
github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8= github.com/Microsoft/hcsshim v0.12.6/go.mod h1:ZABCLVcvLMjIkzr9rUGcQ1QA0p0P3Ps+d3N1g2DsFfk=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
@@ -11,12 +11,12 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8V
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@@ -52,8 +52,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -63,12 +61,18 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
@@ -77,8 +81,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
@@ -110,8 +114,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
@@ -124,8 +128,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
@@ -158,15 +162,15 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@@ -9,8 +9,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "ad" const Name = "ad"
@@ -21,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DirectoryServices_DirectoryServices metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DirectoryServices_DirectoryServices metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
addressBookClientSessions *prometheus.Desc addressBookClientSessions *prometheus.Desc
addressBookOperationsTotal *prometheus.Desc addressBookOperationsTotal *prometheus.Desc
@@ -88,7 +88,7 @@ type Collector struct {
tombstonesObjectsVisitedTotal *prometheus.Desc tombstonesObjectsVisitedTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -97,8 +97,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -110,11 +108,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -122,7 +116,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.addressBookOperationsTotal = prometheus.NewDesc( c.addressBookOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"), prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"),
"", "",
@@ -502,9 +502,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting ad metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting ad metrics", "err", err)
return err return err
} }
return nil return nil
@@ -664,8 +665,7 @@ type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_DirectoryServices_DirectoryServices var dst []Win32_PerfRawData_DirectoryServices_DirectoryServices
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_DirectoryServices_DirectoryServices", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {

View File

@@ -13,6 +13,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "adcs" const Name = "adcs"
@@ -23,7 +24,6 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
challengeResponseProcessingTime *prometheus.Desc challengeResponseProcessingTime *prometheus.Desc
challengeResponsesPerSecond *prometheus.Desc challengeResponsesPerSecond *prometheus.Desc
@@ -40,7 +40,7 @@ type Collector struct {
signedCertificateTimestampListsPerSecond *prometheus.Desc signedCertificateTimestampListsPerSecond *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -49,8 +49,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -62,11 +60,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Certification Authority"}, nil return []string{"Certification Authority"}, nil
} }
@@ -74,7 +68,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.requestsPerSecond = prometheus.NewDesc( c.requestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"), prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Total certificate requests processed", "Total certificate requests processed",
@@ -157,9 +151,10 @@ func (c *Collector) Build() error {
return nil return nil
} }
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collectADCSCounters(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "err", err) if err := c.collectADCSCounters(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting ADCS metrics", "err", err)
return err return err
} }
return nil return nil
@@ -182,12 +177,12 @@ type perflibADCS struct {
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"` SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
} }
func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
dst := make([]perflibADCS, 0) dst := make([]perflibADCS, 0)
if _, ok := ctx.PerfObjects["Certification Authority"]; !ok { if _, ok := ctx.PerfObjects["Certification Authority"]; !ok {
return errors.New("perflib did not contain an entry for Certification Authority") return errors.New("perflib did not contain an entry for Certification Authority")
} }
err := perflib.UnmarshalObject(ctx.PerfObjects["Certification Authority"], &dst, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["Certification Authority"], &dst, logger)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -10,6 +10,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "adfs" const Name = "adfs"
@@ -20,7 +21,6 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
adLoginConnectionFailures *prometheus.Desc adLoginConnectionFailures *prometheus.Desc
artifactDBFailures *prometheus.Desc artifactDBFailures *prometheus.Desc
@@ -67,7 +67,7 @@ type Collector struct {
wstrustTokenRequests *prometheus.Desc wstrustTokenRequests *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -76,8 +76,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -89,11 +87,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"AD FS"}, nil return []string{"AD FS"}, nil
} }
@@ -101,7 +95,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.adLoginConnectionFailures = prometheus.NewDesc( c.adLoginConnectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"), prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
"Total number of connection failures to an Active Directory domain controller", "Total number of connection failures to an Active Directory domain controller",
@@ -410,9 +404,10 @@ type perflibADFS struct {
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"` FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
} }
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var adfsData []perflibADFS var adfsData []perflibADFS
err := perflib.UnmarshalObject(ctx.PerfObjects["AD FS"], &adfsData, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["AD FS"], &adfsData, logger)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -10,6 +10,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "cache" const Name = "cache"
@@ -21,7 +22,6 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for Perflib Cache metrics. // A Collector is a Prometheus Collector for Perflib Cache metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
asyncCopyReadsTotal *prometheus.Desc asyncCopyReadsTotal *prometheus.Desc
asyncDataMapsTotal *prometheus.Desc asyncDataMapsTotal *prometheus.Desc
@@ -54,7 +54,7 @@ type Collector struct {
syncPinReadsTotal *prometheus.Desc syncPinReadsTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -63,8 +63,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -76,11 +74,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Cache"}, nil return []string{"Cache"}, nil
} }
@@ -88,7 +82,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.asyncCopyReadsTotal = prometheus.NewDesc( c.asyncCopyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"), prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)", "(AsyncCopyReadsTotal)",
@@ -267,9 +261,10 @@ func (c *Collector) Build() error {
} }
// Collect implements the Collector interface. // Collect implements the Collector interface.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting cache metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting cache metrics", "err", err)
return err return err
} }
@@ -285,20 +280,23 @@ type perflibCache struct {
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"` AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"` AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"` AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
CopyReadHitsTotal float64 `perflib:"Copy Read Hits/sec"` CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
CopyReadsTotal float64 `perflib:"Copy Reads/sec"` CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
DataFlushesTotal float64 `perflib:"Data Flushes/sec"` DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"` DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"` DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
DataMapsTotal float64 `perflib:"Data Maps/sec"` DataMapsTotal float64 `perflib:"Data Maps/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"` FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"` FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
FastReadsTotal float64 `perflib:"Fast Reads/sec"` FastReadsTotal float64 `perflib:"Fast Reads/sec"`
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"` LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"` LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
MDLReadHitsTotal float64 `perflib:"MDL Read Hits/sec"` MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
MDLReadsTotal float64 `perflib:"MDL Reads/sec"` MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
PinReadHitsTotal float64 `perflib:"Pin Read Hits/sec"` PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
PinReadsTotal float64 `perflib:"Pin Reads/sec"` PinReadsTotal float64 `perflib:"Pin Reads/sec"`
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"` ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"` SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
@@ -306,14 +304,12 @@ type perflibCache struct {
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"` SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"` SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"` SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []perflibCache // Single-instance class, array is required but will have single entry. var dst []perflibCache // Single-instance class, array is required but will have single entry.
if err := perflib.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, logger); err != nil {
return err return err
} }
@@ -326,146 +322,174 @@ func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
prometheus.CounterValue, prometheus.CounterValue,
dst[0].AsyncCopyReadsTotal, dst[0].AsyncCopyReadsTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.asyncDataMapsTotal, c.asyncDataMapsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].AsyncDataMapsTotal, dst[0].AsyncDataMapsTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.asyncFastReadsTotal, c.asyncFastReadsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].AsyncFastReadsTotal, dst[0].AsyncFastReadsTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.asyncMDLReadsTotal, c.asyncMDLReadsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].AsyncMDLReadsTotal, dst[0].AsyncMDLReadsTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.asyncPinReadsTotal, c.asyncPinReadsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].AsyncPinReadsTotal, dst[0].AsyncPinReadsTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.copyReadHitsTotal, c.copyReadHitsTotal,
prometheus.CounterValue, prometheus.GaugeValue,
dst[0].CopyReadHitsTotal, dst[0].CopyReadHitsTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.copyReadsTotal, c.copyReadsTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].CopyReadsTotal, dst[0].CopyReadsTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dataFlushesTotal, c.dataFlushesTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].DataFlushesTotal, dst[0].DataFlushesTotal,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dataFlushPagesTotal, c.dataFlushPagesTotal,
prometheus.CounterValue, prometheus.CounterValue,
dst[0].DataFlushPagesTotal, dst[0].DataFlushPagesTotal,
) )
ch <- prometheus.MustNewConstMetric(
c.dataMapPinsTotal,
prometheus.CounterValue,
dst[0].DataMapPinsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapsTotal,
prometheus.CounterValue,
dst[0].DataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadNotPossiblesTotal,
prometheus.CounterValue,
dst[0].FastReadNotPossiblesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadResourceMissesTotal,
prometheus.CounterValue,
dst[0].FastReadResourceMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadsTotal,
prometheus.CounterValue,
dst[0].FastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWriteFlushesTotal,
prometheus.CounterValue,
dst[0].LazyWriteFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWritePagesTotal,
prometheus.CounterValue,
dst[0].LazyWritePagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadHitsTotal,
prometheus.CounterValue,
dst[0].MDLReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadsTotal,
prometheus.CounterValue,
dst[0].MDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadHitsTotal,
prometheus.CounterValue,
dst[0].PinReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadsTotal,
prometheus.CounterValue,
dst[0].PinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.readAheadsTotal,
prometheus.CounterValue,
dst[0].ReadAheadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncCopyReadsTotal,
prometheus.CounterValue,
dst[0].SyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncDataMapsTotal,
prometheus.CounterValue,
dst[0].SyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncFastReadsTotal,
prometheus.CounterValue,
dst[0].SyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncMDLReadsTotal,
prometheus.CounterValue,
dst[0].SyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncPinReadsTotal,
prometheus.CounterValue,
dst[0].SyncPinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPages,
prometheus.GaugeValue,
dst[0].DirtyPages,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPageThreshold,
prometheus.GaugeValue,
dst[0].DirtyPageThreshold,
)
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dataMapHitsPercent, c.dataMapHitsPercent,
prometheus.GaugeValue, prometheus.GaugeValue,
dst[0].DataMapHitsPercent, dst[0].DataMapHitsPercent,
) )
ch <- prometheus.MustNewConstMetric(
c.dataMapPinsTotal,
prometheus.CounterValue,
dst[0].DataMapPinsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapsTotal,
prometheus.CounterValue,
dst[0].DataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPages,
prometheus.GaugeValue,
dst[0].DirtyPages,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPageThreshold,
prometheus.GaugeValue,
dst[0].DirtyPageThreshold,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadNotPossiblesTotal,
prometheus.CounterValue,
dst[0].FastReadNotPossiblesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadResourceMissesTotal,
prometheus.CounterValue,
dst[0].FastReadResourceMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadsTotal,
prometheus.CounterValue,
dst[0].FastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWriteFlushesTotal,
prometheus.CounterValue,
dst[0].LazyWriteFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWritePagesTotal,
prometheus.CounterValue,
dst[0].LazyWritePagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadHitsTotal,
prometheus.CounterValue,
dst[0].MDLReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadsTotal,
prometheus.CounterValue,
dst[0].MDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadHitsTotal,
prometheus.CounterValue,
dst[0].PinReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadsTotal,
prometheus.CounterValue,
dst[0].PinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.readAheadsTotal,
prometheus.CounterValue,
dst[0].ReadAheadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncCopyReadsTotal,
prometheus.CounterValue,
dst[0].SyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncDataMapsTotal,
prometheus.CounterValue,
dst[0].SyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncFastReadsTotal,
prometheus.CounterValue,
dst[0].SyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncMDLReadsTotal,
prometheus.CounterValue,
dst[0].SyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncPinReadsTotal,
prometheus.CounterValue,
dst[0].SyncPinReadsTotal,
)
return nil return nil
} }

View File

@@ -4,6 +4,7 @@ package collector
import ( import (
"errors" "errors"
"fmt"
"slices" "slices"
"strings" "strings"
@@ -29,11 +30,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk" "github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/logon" "github.com/prometheus-community/windows_exporter/pkg/collector/logon"
"github.com/prometheus-community/windows_exporter/pkg/collector/memory" "github.com/prometheus-community/windows_exporter/pkg/collector/memory"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_cluster" "github.com/prometheus-community/windows_exporter/pkg/collector/mscluster"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_network"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_node"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_resource"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_resourcegroup"
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq" "github.com/prometheus-community/windows_exporter/pkg/collector/msmq"
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql" "github.com/prometheus-community/windows_exporter/pkg/collector/mssql"
"github.com/prometheus-community/windows_exporter/pkg/collector/net" "github.com/prometheus-community/windows_exporter/pkg/collector/net"
@@ -67,10 +64,11 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware_blast" "github.com/prometheus-community/windows_exporter/pkg/collector/vmware_blast"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/yusufpapurcu/wmi"
) )
// NewWithFlags To be called by the exporter for collector initialization before running kingpin.Parse. // NewWithFlags To be called by the exporter for collector initialization before running kingpin.Parse.
func NewWithFlags(app *kingpin.Application) Collectors { func NewWithFlags(app *kingpin.Application) *Collectors {
collectors := map[string]Collector{} collectors := map[string]Collector{}
for name, builder := range BuildersWithFlags { for name, builder := range BuildersWithFlags {
@@ -80,93 +78,78 @@ func NewWithFlags(app *kingpin.Application) Collectors {
return New(collectors) return New(collectors)
} }
func NewBuilderWithFlags[C Collector](fn BuilderWithFlags[C]) BuilderWithFlags[Collector] {
return func(app *kingpin.Application) Collector {
return fn(app)
}
}
// NewWithConfig To be called by the external libraries for collector initialization without running kingpin.Parse // NewWithConfig To be called by the external libraries for collector initialization without running kingpin.Parse
// //
//goland:noinspection GoUnusedExportedFunction //goland:noinspection GoUnusedExportedFunction
func NewWithConfig(logger log.Logger, config Config) Collectors { func NewWithConfig(config Config) *Collectors {
collectors := map[string]Collector{} collectors := map[string]Collector{}
collectors[ad.Name] = ad.New(logger, &config.AD) collectors[ad.Name] = ad.New(&config.AD)
collectors[adcs.Name] = adcs.New(logger, &config.ADCS) collectors[adcs.Name] = adcs.New(&config.ADCS)
collectors[adfs.Name] = adfs.New(logger, &config.ADFS) collectors[adfs.Name] = adfs.New(&config.ADFS)
collectors[cache.Name] = cache.New(logger, &config.Cache) collectors[cache.Name] = cache.New(&config.Cache)
collectors[container.Name] = container.New(logger, &config.Container) collectors[container.Name] = container.New(&config.Container)
collectors[cpu.Name] = cpu.New(logger, &config.CPU) collectors[cpu.Name] = cpu.New(&config.CPU)
collectors[cpu_info.Name] = cpu_info.New(logger, &config.CPUInfo) collectors[cpu_info.Name] = cpu_info.New(&config.CPUInfo)
collectors[cs.Name] = cs.New(logger, &config.Cs) collectors[cs.Name] = cs.New(&config.Cs)
collectors[dfsr.Name] = dfsr.New(logger, &config.DFSR) collectors[dfsr.Name] = dfsr.New(&config.DFSR)
collectors[dhcp.Name] = dhcp.New(logger, &config.Dhcp) collectors[dhcp.Name] = dhcp.New(&config.Dhcp)
collectors[diskdrive.Name] = diskdrive.New(logger, &config.DiskDrive) collectors[diskdrive.Name] = diskdrive.New(&config.DiskDrive)
collectors[dns.Name] = dns.New(logger, &config.DNS) collectors[dns.Name] = dns.New(&config.DNS)
collectors[exchange.Name] = exchange.New(logger, &config.Exchange) collectors[exchange.Name] = exchange.New(&config.Exchange)
collectors[fsrmquota.Name] = fsrmquota.New(logger, &config.Fsrmquota) collectors[fsrmquota.Name] = fsrmquota.New(&config.Fsrmquota)
collectors[hyperv.Name] = hyperv.New(logger, &config.Hyperv) collectors[hyperv.Name] = hyperv.New(&config.Hyperv)
collectors[iis.Name] = iis.New(logger, &config.IIS) collectors[iis.Name] = iis.New(&config.IIS)
collectors[license.Name] = license.New(logger, &config.License) collectors[license.Name] = license.New(&config.License)
collectors[logical_disk.Name] = logical_disk.New(logger, &config.LogicalDisk) collectors[logical_disk.Name] = logical_disk.New(&config.LogicalDisk)
collectors[logon.Name] = logon.New(logger, &config.Logon) collectors[logon.Name] = logon.New(&config.Logon)
collectors[memory.Name] = memory.New(logger, &config.Memory) collectors[memory.Name] = memory.New(&config.Memory)
collectors[mscluster_cluster.Name] = mscluster_cluster.New(logger, &config.MsclusterCluster) collectors[mscluster.Name] = mscluster.New(&config.Mscluster)
collectors[mscluster_network.Name] = mscluster_network.New(logger, &config.MsclusterNetwork) collectors[msmq.Name] = msmq.New(&config.Msmq)
collectors[mscluster_node.Name] = mscluster_node.New(logger, &config.MsclusterNode) collectors[mssql.Name] = mssql.New(&config.Mssql)
collectors[mscluster_resource.Name] = mscluster_resource.New(logger, &config.MsclusterResource) collectors[net.Name] = net.New(&config.Net)
collectors[mscluster_resourcegroup.Name] = mscluster_resourcegroup.New(logger, &config.MsclusterResourceGroup) collectors[netframework_clrexceptions.Name] = netframework_clrexceptions.New(&config.NetframeworkClrexceptions)
collectors[msmq.Name] = msmq.New(logger, &config.Msmq) collectors[netframework_clrinterop.Name] = netframework_clrinterop.New(&config.NetframeworkClrinterop)
collectors[mssql.Name] = mssql.New(logger, &config.Mssql) collectors[netframework_clrjit.Name] = netframework_clrjit.New(&config.NetframeworkClrjit)
collectors[net.Name] = net.New(logger, &config.Net) collectors[netframework_clrloading.Name] = netframework_clrloading.New(&config.NetframeworkClrloading)
collectors[netframework_clrexceptions.Name] = netframework_clrexceptions.New(logger, &config.NetframeworkClrexceptions) collectors[netframework_clrlocksandthreads.Name] = netframework_clrlocksandthreads.New(&config.NetframeworkClrlocksandthreads)
collectors[netframework_clrinterop.Name] = netframework_clrinterop.New(logger, &config.NetframeworkClrinterop) collectors[netframework_clrmemory.Name] = netframework_clrmemory.New(&config.NetframeworkClrmemory)
collectors[netframework_clrjit.Name] = netframework_clrjit.New(logger, &config.NetframeworkClrjit) collectors[netframework_clrremoting.Name] = netframework_clrremoting.New(&config.NetframeworkClrremoting)
collectors[netframework_clrloading.Name] = netframework_clrloading.New(logger, &config.NetframeworkClrloading) collectors[netframework_clrsecurity.Name] = netframework_clrsecurity.New(&config.NetframeworkClrsecurity)
collectors[netframework_clrlocksandthreads.Name] = netframework_clrlocksandthreads.New(logger, &config.NetframeworkClrlocksandthreads) collectors[nps.Name] = nps.New(&config.Nps)
collectors[netframework_clrmemory.Name] = netframework_clrmemory.New(logger, &config.NetframeworkClrmemory) collectors[os.Name] = os.New(&config.Os)
collectors[netframework_clrremoting.Name] = netframework_clrremoting.New(logger, &config.NetframeworkClrremoting) collectors[physical_disk.Name] = physical_disk.New(&config.PhysicalDisk)
collectors[netframework_clrsecurity.Name] = netframework_clrsecurity.New(logger, &config.NetframeworkClrsecurity) collectors[printer.Name] = printer.New(&config.Printer)
collectors[nps.Name] = nps.New(logger, &config.Nps) collectors[process.Name] = process.New(&config.Process)
collectors[os.Name] = os.New(logger, &config.Os) collectors[remote_fx.Name] = remote_fx.New(&config.RemoteFx)
collectors[physical_disk.Name] = physical_disk.New(logger, &config.PhysicalDisk) collectors[scheduled_task.Name] = scheduled_task.New(&config.ScheduledTask)
collectors[printer.Name] = printer.New(logger, &config.Printer) collectors[service.Name] = service.New(&config.Service)
collectors[process.Name] = process.New(logger, &config.Process) collectors[smb.Name] = smb.New(&config.SMB)
collectors[remote_fx.Name] = remote_fx.New(logger, &config.RemoteFx) collectors[smbclient.Name] = smbclient.New(&config.SMBClient)
collectors[scheduled_task.Name] = scheduled_task.New(logger, &config.ScheduledTask) collectors[smtp.Name] = smtp.New(&config.SMTP)
collectors[service.Name] = service.New(logger, &config.Service) collectors[system.Name] = system.New(&config.System)
collectors[smb.Name] = smb.New(logger, &config.SMB) collectors[teradici_pcoip.Name] = teradici_pcoip.New(&config.TeradiciPcoip)
collectors[smbclient.Name] = smbclient.New(logger, &config.SMBClient) collectors[tcp.Name] = tcp.New(&config.TCP)
collectors[smtp.Name] = smtp.New(logger, &config.SMTP) collectors[terminal_services.Name] = terminal_services.New(&config.TerminalServices)
collectors[system.Name] = system.New(logger, &config.System) collectors[textfile.Name] = textfile.New(&config.Textfile)
collectors[teradici_pcoip.Name] = teradici_pcoip.New(logger, &config.TeradiciPcoip) collectors[thermalzone.Name] = thermalzone.New(&config.Thermalzone)
collectors[tcp.Name] = tcp.New(logger, &config.TCP) collectors[time.Name] = time.New(&config.Time)
collectors[terminal_services.Name] = terminal_services.New(logger, &config.TerminalServices) collectors[vmware.Name] = vmware.New(&config.Vmware)
collectors[textfile.Name] = textfile.New(logger, &config.Textfile) collectors[vmware_blast.Name] = vmware_blast.New(&config.VmwareBlast)
collectors[thermalzone.Name] = thermalzone.New(logger, &config.Thermalzone)
collectors[time.Name] = time.New(logger, &config.Time)
collectors[vmware.Name] = vmware.New(logger, &config.Vmware)
collectors[vmware_blast.Name] = vmware_blast.New(logger, &config.VmwareBlast)
return New(collectors) return New(collectors)
} }
// New To be called by the external libraries for collector initialization. // New To be called by the external libraries for collector initialization.
func New(collectors Map) Collectors { func New(collectors Map) *Collectors {
return Collectors{ return &Collectors{
collectors: collectors, collectors: collectors,
wmiClient: &wmi.Client{
AllowMissingFields: true,
},
} }
} }
func (c *Collectors) SetLogger(logger log.Logger) { func (c *Collectors) SetPerfCounterQuery(logger log.Logger) error {
c.logger = logger
for _, collector := range c.collectors {
collector.SetLogger(logger)
}
}
func (c *Collectors) SetPerfCounterQuery() error {
var ( var (
err error err error
@@ -177,7 +160,7 @@ func (c *Collectors) SetPerfCounterQuery() error {
perfCounterDependencies := make([]string, 0, len(c.collectors)) perfCounterDependencies := make([]string, 0, len(c.collectors))
for _, collector := range c.collectors { for _, collector := range c.collectors {
perfCounterNames, err = collector.GetPerfCounter() perfCounterNames, err = collector.GetPerfCounter(logger)
if err != nil { if err != nil {
return err return err
} }
@@ -205,12 +188,17 @@ func (c *Collectors) Enable(enabledCollectors []string) {
} }
// Build To be called by the exporter for collector initialization. // Build To be called by the exporter for collector initialization.
func (c *Collectors) Build() error { func (c *Collectors) Build(logger log.Logger) error {
var err error var err error
c.wmiClient.SWbemServicesClient, err = wmi.InitializeSWbemServices(c.wmiClient)
if err != nil {
return fmt.Errorf("initialize SWbemServices: %w", err)
}
for _, collector := range c.collectors { for _, collector := range c.collectors {
if err = collector.Build(); err != nil { if err = collector.Build(logger, c.wmiClient); err != nil {
return err return fmt.Errorf("error build collector %s: %w", collector.GetName(), err)
} }
} }
@@ -232,7 +220,13 @@ func (c *Collectors) Close() error {
errs := make([]error, 0, len(c.collectors)) errs := make([]error, 0, len(c.collectors))
for _, collector := range c.collectors { for _, collector := range c.collectors {
if err := collector.Build(); err != nil { if err := collector.Close(); err != nil {
errs = append(errs, err)
}
}
if c.wmiClient != nil && c.wmiClient.SWbemServicesClient != nil {
if err := c.wmiClient.SWbemServicesClient.Close(); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
} }

View File

@@ -21,11 +21,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk" "github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/logon" "github.com/prometheus-community/windows_exporter/pkg/collector/logon"
"github.com/prometheus-community/windows_exporter/pkg/collector/memory" "github.com/prometheus-community/windows_exporter/pkg/collector/memory"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_cluster" "github.com/prometheus-community/windows_exporter/pkg/collector/mscluster"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_network"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_node"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_resource"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_resourcegroup"
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq" "github.com/prometheus-community/windows_exporter/pkg/collector/msmq"
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql" "github.com/prometheus-community/windows_exporter/pkg/collector/mssql"
"github.com/prometheus-community/windows_exporter/pkg/collector/net" "github.com/prometheus-community/windows_exporter/pkg/collector/net"
@@ -80,11 +76,7 @@ type Config struct {
LogicalDisk logical_disk.Config `yaml:"logical_disk"` LogicalDisk logical_disk.Config `yaml:"logical_disk"`
Logon logon.Config `yaml:"logon"` Logon logon.Config `yaml:"logon"`
Memory memory.Config `yaml:"memory"` Memory memory.Config `yaml:"memory"`
MsclusterCluster mscluster_cluster.Config `yaml:"mscluster_cluster"` Mscluster mscluster.Config `yaml:"mscluster"`
MsclusterNetwork mscluster_network.Config `yaml:"mscluster_network"`
MsclusterNode mscluster_node.Config `yaml:"mscluster_node"`
MsclusterResource mscluster_resource.Config `yaml:"mscluster_resource"`
MsclusterResourceGroup mscluster_resourcegroup.Config `yaml:"mscluster_resourcegroup"` //nolint:tagliatelle
Msmq msmq.Config `yaml:"msmq"` Msmq msmq.Config `yaml:"msmq"`
Mssql mssql.Config `yaml:"mssql"` Mssql mssql.Config `yaml:"mssql"`
Net net.Config `yaml:"net"` Net net.Config `yaml:"net"`
@@ -142,11 +134,7 @@ var ConfigDefaults = Config{
LogicalDisk: logical_disk.ConfigDefaults, LogicalDisk: logical_disk.ConfigDefaults,
Logon: logon.ConfigDefaults, Logon: logon.ConfigDefaults,
Memory: memory.ConfigDefaults, Memory: memory.ConfigDefaults,
MsclusterCluster: mscluster_cluster.ConfigDefaults, Mscluster: mscluster.ConfigDefaults,
MsclusterNetwork: mscluster_network.ConfigDefaults,
MsclusterNode: mscluster_node.ConfigDefaults,
MsclusterResource: mscluster_resource.ConfigDefaults,
MsclusterResourceGroup: mscluster_resourcegroup.ConfigDefaults,
Msmq: msmq.ConfigDefaults, Msmq: msmq.ConfigDefaults,
Mssql: mssql.ConfigDefaults, Mssql: mssql.ConfigDefaults,
Net: net.ConfigDefaults, Net: net.ConfigDefaults,

View File

@@ -12,6 +12,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "container" const Name = "container"
@@ -23,7 +24,6 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for containers metrics. // A Collector is a Prometheus Collector for containers metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
// Presence // Presence
containerAvailable *prometheus.Desc containerAvailable *prometheus.Desc
@@ -57,7 +57,7 @@ type Collector struct {
} }
// New constructs a new Collector. // New constructs a new Collector.
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -66,8 +66,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -79,11 +77,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -91,7 +85,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.containerAvailable = prometheus.NewDesc( c.containerAvailable = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available"), prometheus.BuildFQName(types.Namespace, Name, "available"),
"Available", "Available",
@@ -205,27 +199,28 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting collector metrics", "err", err) if err := c.collect(logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting collector metrics", "err", err)
return err return err
} }
return nil return nil
} }
// containerClose closes the container resource. // containerClose closes the container resource.
func (c *Collector) containerClose(container hcsshim.Container) { func (c *Collector) containerClose(logger log.Logger, container hcsshim.Container) {
err := container.Close() err := container.Close()
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("err", err) _ = level.Error(logger).Log("err", err)
} }
} }
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(logger log.Logger, ch chan<- prometheus.Metric) error {
// Types Container is passed to get the containers compute systems only // Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}}) containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("msg", "Err in Getting containers", "err", err) _ = level.Error(logger).Log("msg", "Err in Getting containers", "err", err)
return err return err
} }
@@ -247,16 +242,16 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
func() { func() {
container, err := hcsshim.OpenContainer(containerDetails.ID) container, err := hcsshim.OpenContainer(containerDetails.ID)
if container != nil { if container != nil {
defer c.containerClose(container) defer c.containerClose(logger, container)
} }
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("msg", "err in opening container", "containerId", containerDetails.ID, "err", err) _ = level.Error(logger).Log("msg", "err in opening container", "containerId", containerDetails.ID, "err", err)
return return
} }
cstats, err := container.Statistics() cstats, err := container.Statistics()
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("msg", "err in fetching container Statistics", "containerId", containerDetails.ID, "err", err) _ = level.Error(logger).Log("msg", "err in fetching container Statistics", "containerId", containerDetails.ID, "err", err)
return return
} }
@@ -334,19 +329,19 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
hnsEndpoints, err := hcsshim.HNSListEndpointRequest() hnsEndpoints, err := hcsshim.HNSListEndpointRequest()
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to collect network stats for containers") _ = level.Warn(logger).Log("msg", "Failed to collect network stats for containers")
return err return err
} }
if len(hnsEndpoints) == 0 { if len(hnsEndpoints) == 0 {
_ = level.Info(c.logger).Log("msg", "No network stats for containers to collect") _ = level.Info(logger).Log("msg", "No network stats for containers to collect")
return nil return nil
} }
for _, endpoint := range hnsEndpoints { for _, endpoint := range hnsEndpoints {
endpointStats, err := hcsshim.GetHNSEndpointStats(endpoint.Id) endpointStats, err := hcsshim.GetHNSEndpointStats(endpoint.Id)
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to collect network stats for interface "+endpoint.Id, "err", err) _ = level.Warn(logger).Log("msg", "Failed to collect network stats for interface "+endpoint.Id, "err", err)
continue continue
} }
@@ -355,7 +350,7 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
endpointId := strings.ToUpper(endpoint.Id) endpointId := strings.ToUpper(endpoint.Id)
if !ok { if !ok {
_ = level.Warn(c.logger).Log("msg", "Failed to collect network stats for container "+containerId) _ = level.Warn(logger).Log("msg", "Failed to collect network stats for container "+containerId)
continue continue
} }

View File

@@ -11,6 +11,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/winversion" "github.com/prometheus-community/windows_exporter/pkg/winversion"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "cpu" const Name = "cpu"
@@ -21,7 +22,6 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
cStateSecondsTotal *prometheus.Desc cStateSecondsTotal *prometheus.Desc
timeTotal *prometheus.Desc timeTotal *prometheus.Desc
@@ -38,7 +38,7 @@ type Collector struct {
processorPrivilegedUtility *prometheus.Desc processorPrivilegedUtility *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -47,8 +47,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -60,12 +58,8 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name) if winversion.WindowsVersionFloat() > 6.05 {
}
func (c *Collector) GetPerfCounter() ([]string, error) {
if winversion.WindowsVersionFloat > 6.05 {
return []string{"Processor Information"}, nil return []string{"Processor Information"}, nil
} }
return []string{"Processor"}, nil return []string{"Processor"}, nil
@@ -75,7 +69,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.cStateSecondsTotal = prometheus.NewDesc( c.cStateSecondsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"), prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"),
"Time spent in low-power idle state", "Time spent in low-power idle state",
@@ -107,7 +101,7 @@ func (c *Collector) Build() error {
// are added in later versions, so we aren't guaranteed to get all of // are added in later versions, so we aren't guaranteed to get all of
// them). // them).
// Value 6.05 was selected to split between Windows versions. // Value 6.05 was selected to split between Windows versions.
if winversion.WindowsVersionFloat < 6.05 { if winversion.WindowsVersionFloat() < 6.05 {
return nil return nil
} }
@@ -193,12 +187,13 @@ func (c *Collector) Build() error {
return nil return nil
} }
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if winversion.WindowsVersionFloat > 6.05 { logger = log.With(logger, "collector", Name)
return c.CollectFull(ctx, ch) if winversion.WindowsVersionFloat() > 6.05 {
return c.CollectFull(ctx, logger, ch)
} }
return c.CollectBasic(ctx, ch) return c.CollectBasic(ctx, logger, ch)
} }
type perflibProcessor struct { type perflibProcessor struct {
@@ -220,9 +215,10 @@ type perflibProcessor struct {
PercentUserTime float64 `perflib:"% User Time"` PercentUserTime float64 `perflib:"% User Time"`
} }
func (c *Collector) CollectBasic(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) CollectBasic(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
data := make([]perflibProcessor, 0) data := make([]perflibProcessor, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Processor"], &data, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["Processor"], &data, logger)
if err != nil { if err != nil {
return err return err
} }
@@ -329,9 +325,10 @@ type perflibProcessorInformation struct {
UserTimeSeconds float64 `perflib:"% User Time"` UserTimeSeconds float64 `perflib:"% User Time"`
} }
func (c *Collector) CollectFull(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) CollectFull(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
data := make([]perflibProcessorInformation, 0) data := make([]perflibProcessorInformation, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Processor Information"], &data, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["Processor Information"], &data, logger)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -11,14 +11,12 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const ( const (
Name = "cpu_info" Name = "cpu_info"
// If you are adding additional labels to the metric, make sure that they get added in here as well. See below for explanation.
win32ProcessorQuery = "SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name FROM Win32_Processor"
) )
type Config struct{} type Config struct{}
@@ -28,12 +26,19 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_Processor. // A Collector is a Prometheus Collector for a few WMI metrics in Win32_Processor.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
cpuInfo *prometheus.Desc wmiClient *wmi.Client
cpuInfo *prometheus.Desc
cpuCoreCount *prometheus.Desc
cpuEnabledCoreCount *prometheus.Desc
cpuLogicalProcessorsCount *prometheus.Desc
cpuThreadCount *prometheus.Desc
cpuL2CacheSize *prometheus.Desc
cpuL3CacheSize *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -42,8 +47,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -55,11 +58,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -67,7 +66,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.cpuInfo = prometheus.NewDesc( c.cpuInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "", Name), prometheus.BuildFQName(types.Namespace, "", Name),
"Labelled CPU information as provided by Win32_Processor", "Labelled CPU information as provided by Win32_Processor",
@@ -76,42 +80,93 @@ func (c *Collector) Build() error {
"device_id", "device_id",
"description", "description",
"family", "family",
"l2_cache_size",
"l3_cache_size",
"name", "name",
}, },
nil, nil,
) )
c.cpuThreadCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "thread"),
"Number of threads per CPU",
[]string{
"device_id",
},
nil,
)
c.cpuCoreCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "core"),
"Number of cores per CPU",
[]string{
"device_id",
},
nil,
)
c.cpuEnabledCoreCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "enabled_core"),
"Number of enabled cores per CPU",
[]string{
"device_id",
},
nil,
)
c.cpuLogicalProcessorsCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logical_processor"),
"Number of logical processors per CPU",
[]string{
"device_id",
},
nil,
)
c.cpuL2CacheSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "l2_cache_size"),
"Size of L2 cache per CPU",
[]string{
"device_id",
},
nil,
)
c.cpuL3CacheSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "l3_cache_size"),
"Size of L3 cache per CPU",
[]string{
"device_id",
},
nil,
)
return nil return nil
} }
type win32_Processor struct { type win32Processor struct {
Architecture uint32 Architecture uint32
DeviceID string DeviceID string
Description string Description string
Family uint16 Family uint16
L2CacheSize uint32 L2CacheSize uint32
L3CacheSize uint32 L3CacheSize uint32
Name string Name string
ThreadCount uint32
NumberOfCores uint32
NumberOfEnabledCore uint32
NumberOfLogicalProcessors uint32
} }
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting cpu_info metrics", "err", err)
return err return err
} }
return nil return nil
} }
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32_Processor var dst []win32Processor
// We use a static query here because the provided methods in wmi.go all issue a SELECT *; // We use a static query here because the provided methods in wmi.go all issue a SELECT *;
// This results in the time-consuming LoadPercentage field being read which seems to measure each CPU // This results in the time-consuming LoadPercentage field being read which seems to measure each CPU
// serially over a 1 second interval, so the scrape time is at least 1s * num_sockets // serially over a 1 second interval, so the scrape time is at least 1s * num_sockets
if err := wmi.Query(win32ProcessorQuery, &dst); err != nil { if err := c.wmiClient.Query("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor", &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {
@@ -128,10 +183,44 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
strings.TrimRight(processor.DeviceID, " "), strings.TrimRight(processor.DeviceID, " "),
strings.TrimRight(processor.Description, " "), strings.TrimRight(processor.Description, " "),
strconv.Itoa(int(processor.Family)), strconv.Itoa(int(processor.Family)),
strconv.Itoa(int(processor.L2CacheSize)),
strconv.Itoa(int(processor.L3CacheSize)),
strings.TrimRight(processor.Name, " "), strings.TrimRight(processor.Name, " "),
) )
ch <- prometheus.MustNewConstMetric(
c.cpuCoreCount,
prometheus.GaugeValue,
float64(processor.NumberOfCores),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuEnabledCoreCount,
prometheus.GaugeValue,
float64(processor.NumberOfEnabledCore),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuLogicalProcessorsCount,
prometheus.GaugeValue,
float64(processor.NumberOfLogicalProcessors),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuThreadCount,
prometheus.GaugeValue,
float64(processor.ThreadCount),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuL2CacheSize,
prometheus.GaugeValue,
float64(processor.L2CacheSize),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuL3CacheSize,
prometheus.GaugeValue,
float64(processor.L3CacheSize),
strings.TrimRight(processor.DeviceID, " "),
)
} }
return nil return nil

View File

@@ -9,6 +9,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi" "github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "cs" const Name = "cs"
@@ -20,14 +21,13 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics. // A Collector is a Prometheus Collector for WMI metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
physicalMemoryBytes *prometheus.Desc physicalMemoryBytes *prometheus.Desc
logicalProcessors *prometheus.Desc logicalProcessors *prometheus.Desc
hostname *prometheus.Desc hostname *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -36,8 +36,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -49,11 +47,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -61,7 +55,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.logicalProcessors = prometheus.NewDesc( c.logicalProcessors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logical_processors"), prometheus.BuildFQName(types.Namespace, Name, "logical_processors"),
"ComputerSystem.NumberOfLogicalProcessors", "ComputerSystem.NumberOfLogicalProcessors",
@@ -89,9 +83,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting cs metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting cs metrics", "err", err)
return err return err
} }
return nil return nil

View File

@@ -12,6 +12,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "dfsr" const Name = "dfsr"
@@ -27,7 +28,6 @@ var ConfigDefaults = Config{
// Collector contains the metric and state data of the DFSR collectors. // Collector contains the metric and state data of the DFSR collectors.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
// connection source // connection source
connectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc connectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
@@ -80,7 +80,7 @@ type Collector struct {
dfsrChildCollectors []dfsrCollectorFunc dfsrChildCollectors []dfsrCollectorFunc
} }
type dfsrCollectorFunc func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error type dfsrCollectorFunc func(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error
// Map Perflib sources to DFSR Collector names // Map Perflib sources to DFSR Collector names
// e.g, volume -> DFS Replication Service Volumes. // e.g, volume -> DFS Replication Service Volumes.
@@ -98,7 +98,7 @@ func dfsrGetPerfObjectName(collector string) string {
return prefix + suffix return prefix + suffix
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -111,8 +111,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -139,11 +137,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
// Perflib sources are dynamic, depending on the enabled child collectors // Perflib sources are dynamic, depending on the enabled child collectors
expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled) expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled)
perflibDependencies := make([]string, 0, len(expandedChildCollectors)) perflibDependencies := make([]string, 0, len(expandedChildCollectors))
@@ -159,8 +153,10 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, _ *wmi.Client) error {
_ = level.Info(c.logger).Log("msg", "dfsr collector is in an experimental state! Metrics for this collector have not been tested.") logger = log.With(logger, "collector", Name)
_ = level.Info(logger).Log("msg", "dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
// connection // connection
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc( c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
@@ -479,9 +475,10 @@ func (c *Collector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCol
// Collect implements the Collector interface. // Collect implements the Collector interface.
// Sends metric values for each metric to the provided prometheus Metric channel. // Sends metric values for each metric to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
for _, fn := range c.dfsrChildCollectors { for _, fn := range c.dfsrChildCollectors {
err := fn(ctx, ch) err := fn(ctx, logger, ch)
if err != nil { if err != nil {
return err return err
} }
@@ -504,9 +501,10 @@ type PerflibDFSRConnection struct {
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"` SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
} }
func (c *Collector) collectConnection(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectConnection(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []PerflibDFSRConnection var dst []PerflibDFSRConnection
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Connections"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Connections"], &dst, logger); err != nil {
return err return err
} }
@@ -610,9 +608,10 @@ type perflibDFSRFolder struct {
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"` UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
} }
func (c *Collector) collectFolder(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []perflibDFSRFolder var dst []perflibDFSRFolder
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replicated Folders"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replicated Folders"], &dst, logger); err != nil {
return err return err
} }
@@ -820,9 +819,10 @@ type perflibDFSRVolume struct {
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"` USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
} }
func (c *Collector) collectVolume(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectVolume(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []perflibDFSRVolume var dst []perflibDFSRVolume
if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Service volumes"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["DFS Replication Service volumes"], &dst, logger); err != nil {
return err return err
} }

View File

@@ -8,6 +8,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "dhcp" const Name = "dhcp"
@@ -19,7 +20,6 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector perflib DHCP metrics. // A Collector is a Prometheus Collector perflib DHCP metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
acksTotal *prometheus.Desc acksTotal *prometheus.Desc
activeQueueLength *prometheus.Desc activeQueueLength *prometheus.Desc
@@ -48,7 +48,7 @@ type Collector struct {
requestsTotal *prometheus.Desc requestsTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -57,8 +57,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -70,11 +68,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"DHCP Server"}, nil return []string{"DHCP Server"}, nil
} }
@@ -82,7 +76,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.packetsReceivedTotal = prometheus.NewDesc( c.packetsReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"), prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
"Total number of packets received by the DHCP server (PacketsReceivedTotal)", "Total number of packets received by the DHCP server (PacketsReceivedTotal)",
@@ -267,9 +261,10 @@ type dhcpPerf struct {
FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."` FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."`
} }
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dhcpPerfs []dhcpPerf var dhcpPerfs []dhcpPerf
if err := perflib.UnmarshalObject(ctx.PerfObjects["DHCP Server"], &dhcpPerfs, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["DHCP Server"], &dhcpPerfs, logger); err != nil {
return err return err
} }

View File

@@ -10,8 +10,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const ( const (
@@ -25,8 +25,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive. // A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
availability *prometheus.Desc availability *prometheus.Desc
diskInfo *prometheus.Desc diskInfo *prometheus.Desc
@@ -35,7 +35,7 @@ type Collector struct {
status *prometheus.Desc status *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -44,8 +44,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -57,11 +55,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -69,7 +63,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.diskInfo = prometheus.NewDesc( c.diskInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"), prometheus.BuildFQName(types.Namespace, Name, "info"),
"General drive information", "General drive information",
@@ -162,9 +161,10 @@ var (
) )
// Collect sends the metric values for each metric to the provided prometheus Metric channel. // Collect sends the metric values for each metric to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting disk_drive_info metrics", "err", err)
return err return err
} }
return nil return nil
@@ -173,7 +173,7 @@ func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32_DiskDrive var dst []win32_DiskDrive
if err := wmi.Query(win32DiskQuery, &dst); err != nil { if err := c.wmiClient.Query(win32DiskQuery, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {

View File

@@ -9,8 +9,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "dns" const Name = "dns"
@@ -21,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
dynamicUpdatesFailures *prometheus.Desc dynamicUpdatesFailures *prometheus.Desc
dynamicUpdatesQueued *prometheus.Desc dynamicUpdatesQueued *prometheus.Desc
@@ -48,7 +48,7 @@ type Collector struct {
zoneTransferSuccessSent *prometheus.Desc zoneTransferSuccessSent *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -57,8 +57,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -70,11 +68,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -82,7 +76,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.zoneTransferRequestsReceived = prometheus.NewDesc( c.zoneTransferRequestsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"), prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server", "Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
@@ -220,9 +220,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting dns metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting dns metrics", "err", err)
return err return err
} }
return nil return nil
@@ -276,8 +277,7 @@ type Win32_PerfRawData_DNS_DNS struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_DNS_DNS var dst []Win32_PerfRawData_DNS_DNS
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_DNS_DNS", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {

View File

@@ -5,7 +5,6 @@ package exchange
import ( import (
"fmt" "fmt"
"os" "os"
"slices"
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
@@ -14,6 +13,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "exchange" const Name = "exchange"
@@ -39,7 +39,6 @@ var ConfigDefaults = Config{
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
activeMailboxDeliveryQueueLength *prometheus.Desc activeMailboxDeliveryQueueLength *prometheus.Desc
activeSyncRequestsPerSec *prometheus.Desc activeSyncRequestsPerSec *prometheus.Desc
@@ -79,11 +78,9 @@ type Collector struct {
unreachableQueueLength *prometheus.Desc unreachableQueueLength *prometheus.Desc
userCount *prometheus.Desc userCount *prometheus.Desc
yieldedTasks *prometheus.Desc yieldedTasks *prometheus.Desc
enabledCollectors []string
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -96,8 +93,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -163,11 +158,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{ return []string{
"MSExchange ADAccess Processes", "MSExchange ADAccess Processes",
"MSExchangeTransport Queues", "MSExchangeTransport Queues",
@@ -186,7 +177,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
// desc creates a new prometheus description // desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc { desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
return prometheus.NewDesc( return prometheus.NewDesc(
@@ -236,24 +227,13 @@ func (c *Collector) Build() error {
c.syncCommandsPerSec = desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder") c.syncCommandsPerSec = desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder")
c.activeUserCountMapiHttpEmsMDB = desc("mapihttp_emsmdb_active_user_count", "Number of unique outlook users that have shown some kind of activity in the last 2 minutes") c.activeUserCountMapiHttpEmsMDB = desc("mapihttp_emsmdb_active_user_count", "Number of unique outlook users that have shown some kind of activity in the last 2 minutes")
c.enabledCollectors = make([]string, 0, len(ConfigDefaults.CollectorsEnabled))
for _, collectorName := range c.config.CollectorsEnabled {
if !slices.Contains(ConfigDefaults.CollectorsEnabled, collectorName) {
return fmt.Errorf("unknown exchange collector: %s", collectorName)
}
c.enabledCollectors = append(c.enabledCollectors, collectorName)
}
c.enabledCollectors = slices.Clip(c.enabledCollectors)
return nil return nil
} }
// Collect collects exchange metrics and sends them to prometheus. // Collect collects exchange metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error{ logger = log.With(logger, "collector", Name)
collectorFuncs := map[string]func(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error{
"ADAccessProcesses": c.collectADAccessProcesses, "ADAccessProcesses": c.collectADAccessProcesses,
"TransportQueues": c.collectTransportQueues, "TransportQueues": c.collectTransportQueues,
"HttpProxy": c.collectHTTPProxy, "HttpProxy": c.collectHTTPProxy,
@@ -266,9 +246,9 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
"MapiHttpEmsmdb": c.collectMapiHttpEmsmdb, "MapiHttpEmsmdb": c.collectMapiHttpEmsmdb,
} }
for _, collectorName := range c.enabledCollectors { for _, collectorName := range c.config.CollectorsEnabled {
if err := collectorFuncs[collectorName](ctx, ch); err != nil { if err := collectorFuncs[collectorName](ctx, logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err) _ = level.Error(logger).Log("msg", "Error in "+collectorName, "err", err)
return err return err
} }
} }
@@ -286,9 +266,10 @@ type perflibADAccessProcesses struct {
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"` LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
} }
func (c *Collector) collectADAccessProcesses(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectADAccessProcesses(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibADAccessProcesses var data []perflibADAccessProcesses
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ADAccess Processes"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ADAccess Processes"], &data, logger); err != nil {
return err return err
} }
@@ -344,9 +325,10 @@ type perflibAvailabilityService struct {
RequestsSec float64 `perflib:"Availability Requests (sec)"` RequestsSec float64 `perflib:"Availability Requests (sec)"`
} }
func (c *Collector) collectAvailabilityService(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectAvailabilityService(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibAvailabilityService var data []perflibAvailabilityService
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange Availability Service"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange Availability Service"], &data, logger); err != nil {
return err return err
} }
@@ -372,9 +354,10 @@ type perflibHTTPProxy struct {
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"` ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
} }
func (c *Collector) collectHTTPProxy(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectHTTPProxy(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibHTTPProxy var data []perflibHTTPProxy
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange HttpProxy"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange HttpProxy"], &data, logger); err != nil {
return err return err
} }
@@ -426,9 +409,10 @@ type perflibOWA struct {
RequestsPerSec float64 `perflib:"Requests/sec"` RequestsPerSec float64 `perflib:"Requests/sec"`
} }
func (c *Collector) collectOWA(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectOWA(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibOWA var data []perflibOWA
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange OWA"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange OWA"], &data, logger); err != nil {
return err return err
} }
@@ -454,9 +438,10 @@ type perflibActiveSync struct {
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"` SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
} }
func (c *Collector) collectActiveSync(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectActiveSync(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibActiveSync var data []perflibActiveSync
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ActiveSync"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ActiveSync"], &data, logger); err != nil {
return err return err
} }
@@ -490,9 +475,10 @@ type perflibRPCClientAccess struct {
UserCount float64 `perflib:"User Count"` UserCount float64 `perflib:"User Count"`
} }
func (c *Collector) collectRPC(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectRPC(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibRPCClientAccess var data []perflibRPCClientAccess
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange RpcClientAccess"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange RpcClientAccess"], &data, logger); err != nil {
return err return err
} }
@@ -546,9 +532,10 @@ type perflibTransportQueues struct {
PoisonQueueLength float64 `perflib:"Poison Queue Length"` PoisonQueueLength float64 `perflib:"Poison Queue Length"`
} }
func (c *Collector) collectTransportQueues(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectTransportQueues(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibTransportQueues var data []perflibTransportQueues
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeTransport Queues"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeTransport Queues"], &data, logger); err != nil {
return err return err
} }
@@ -620,9 +607,10 @@ type perflibWorkloadManagementWorkloads struct {
IsActive float64 `perflib:"Active"` IsActive float64 `perflib:"Active"`
} }
func (c *Collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibWorkloadManagementWorkloads var data []perflibWorkloadManagementWorkloads
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange WorkloadManagement Workloads"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange WorkloadManagement Workloads"], &data, logger); err != nil {
return err return err
} }
@@ -671,9 +659,10 @@ type perflibAutodiscover struct {
RequestsPerSec float64 `perflib:"Requests/sec"` RequestsPerSec float64 `perflib:"Requests/sec"`
} }
func (c *Collector) collectAutoDiscover(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectAutoDiscover(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibAutodiscover var data []perflibAutodiscover
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeAutodiscover"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeAutodiscover"], &data, logger); err != nil {
return err return err
} }
for _, autodisc := range data { for _, autodisc := range data {
@@ -691,9 +680,10 @@ type perflibMapiHttpEmsmdb struct {
ActiveUserCount float64 `perflib:"Active User Count"` ActiveUserCount float64 `perflib:"Active User Count"`
} }
func (c *Collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibMapiHttpEmsmdb var data []perflibMapiHttpEmsmdb
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange MapiHttp Emsmdb"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange MapiHttp Emsmdb"], &data, logger); err != nil {
return err return err
} }

View File

@@ -3,13 +3,15 @@
package fsrmquota package fsrmquota
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "fsrmquota" const Name = "fsrmquota"
@@ -19,8 +21,8 @@ type Config struct{}
var ConfigDefaults = Config{} var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
quotasCount *prometheus.Desc quotasCount *prometheus.Desc
peakUsage *prometheus.Desc peakUsage *prometheus.Desc
@@ -34,7 +36,7 @@ type Collector struct {
template *prometheus.Desc template *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -43,8 +45,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -56,11 +56,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -68,7 +64,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.quotasCount = prometheus.NewDesc( c.quotasCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "count"), prometheus.BuildFQName(types.Namespace, Name, "count"),
"Number of Quotas", "Number of Quotas",
@@ -128,9 +130,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting fsrmquota metrics", "err", err)
return err return err
} }
return nil return nil
@@ -155,11 +158,9 @@ type MSFT_FSRMQuota struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []MSFT_FSRMQuota var dst []MSFT_FSRMQuota
q := wmi.QueryAll(&dst, c.logger)
var count int var count int
if err := wmi.QueryNamespace(q, &dst, "root/microsoft/windows/fsrm"); err != nil { if err := c.wmiClient.Query("SELECT * FROM MSFT_FSRMQuota", &dst, nil, "root/microsoft/windows/fsrm"); err != nil {
return err return err
} }

View File

@@ -11,13 +11,14 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
) )
func (c *Collectors) BuildServeHTTP(disableExporterMetrics bool, timeoutMargin float64) http.HandlerFunc { func (c *Collectors) BuildServeHTTP(logger log.Logger, disableExporterMetrics bool, timeoutMargin float64) http.HandlerFunc {
collectorFactory := func(timeout time.Duration, requestedCollectors []string) (error, *Prometheus) { collectorFactory := func(timeout time.Duration, requestedCollectors []string) (error, *Prometheus) {
filteredCollectors := make(map[string]Collector) filteredCollectors := make(map[string]Collector)
// scrape all enabled collectors if no collector is requested // scrape all enabled collectors if no collector is requested
@@ -33,15 +34,16 @@ func (c *Collectors) BuildServeHTTP(disableExporterMetrics bool, timeoutMargin f
} }
filtered := Collectors{ filtered := Collectors{
logger: c.logger,
collectors: filteredCollectors, collectors: filteredCollectors,
perfCounterQuery: c.perfCounterQuery, perfCounterQuery: c.perfCounterQuery,
} }
return nil, NewPrometheus(timeout, &filtered, c.logger) return nil, NewPrometheus(timeout, &filtered, logger)
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
logger := log.With(logger, "remote", r.RemoteAddr, "correlation_id", uuid.New().String())
const defaultTimeout = 10.0 const defaultTimeout = 10.0
var timeoutSeconds float64 var timeoutSeconds float64
@@ -49,7 +51,7 @@ func (c *Collectors) BuildServeHTTP(disableExporterMetrics bool, timeoutMargin f
var err error var err error
timeoutSeconds, err = strconv.ParseFloat(v, 64) timeoutSeconds, err = strconv.ParseFloat(v, 64)
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout)) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout))
} }
} }
if timeoutSeconds == 0 { if timeoutSeconds == 0 {
@@ -60,9 +62,9 @@ func (c *Collectors) BuildServeHTTP(disableExporterMetrics bool, timeoutMargin f
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
err, wc := collectorFactory(time.Duration(timeoutSeconds*float64(time.Second)), r.URL.Query()["collect[]"]) err, wc := collectorFactory(time.Duration(timeoutSeconds*float64(time.Second)), r.URL.Query()["collect[]"])
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", "Couldn't create filtered metrics handler", "err", err) _ = level.Warn(logger).Log("msg", "Couldn't create filtered metrics handler", "err", err)
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err))) //nolint:errcheck _, _ = w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err)))
return return
} }
@@ -76,7 +78,7 @@ func (c *Collectors) BuildServeHTTP(disableExporterMetrics bool, timeoutMargin f
} }
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{ h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{
ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(c.logger)), "", stdlog.Lshortfile), ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(logger)), "", stdlog.Lshortfile),
}) })
h.ServeHTTP(w, r) h.ServeHTTP(w, r)
} }

View File

@@ -3,6 +3,7 @@
package hyperv package hyperv
import ( import (
"errors"
"fmt" "fmt"
"strings" "strings"
@@ -10,8 +11,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "hyperv" const Name = "hyperv"
@@ -22,8 +23,8 @@ var ConfigDefaults = Config{}
// Collector is a Prometheus Collector for hyper-v. // Collector is a Prometheus Collector for hyper-v.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
// Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary // Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
healthCritical *prometheus.Desc healthCritical *prometheus.Desc
@@ -140,7 +141,7 @@ type Collector struct {
vmMemoryRemovedMemory *prometheus.Desc vmMemoryRemovedMemory *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -149,8 +150,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -162,11 +161,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -174,7 +169,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
buildSubsystemName := func(component string) string { return "hyperv_" + component } buildSubsystemName := func(component string) string { return "hyperv_" + component }
c.healthCritical = prometheus.NewDesc( c.healthCritical = prometheus.NewDesc(
@@ -754,64 +755,65 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collectVmHealth(ch); err != nil { if err := c.collectVmHealth(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV health status metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV health status metrics", "err", err)
return err return err
} }
if err := c.collectVmVid(ch); err != nil { if err := c.collectVmVid(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV pages metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV pages metrics", "err", err)
return err return err
} }
if err := c.collectVmHv(ch); err != nil { if err := c.collectVmHv(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV hv status metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV hv status metrics", "err", err)
return err return err
} }
if err := c.collectVmProcessor(ch); err != nil { if err := c.collectVmProcessor(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV processor metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV processor metrics", "err", err)
return err return err
} }
if err := c.collectHostLPUsage(ch); err != nil { if err := c.collectHostLPUsage(logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host logical processors metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV host logical processors metrics", "err", err)
return err return err
} }
if err := c.collectHostCpuUsage(ch); err != nil { if err := c.collectHostCpuUsage(logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host CPU metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV host CPU metrics", "err", err)
return err return err
} }
if err := c.collectVmCpuUsage(ch); err != nil { if err := c.collectVmCpuUsage(logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV VM CPU metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV VM CPU metrics", "err", err)
return err return err
} }
if err := c.collectVmSwitch(ch); err != nil { if err := c.collectVmSwitch(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV switch metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV switch metrics", "err", err)
return err return err
} }
if err := c.collectVmEthernet(ch); err != nil { if err := c.collectVmEthernet(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV ethernet metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV ethernet metrics", "err", err)
return err return err
} }
if err := c.collectVmStorage(ch); err != nil { if err := c.collectVmStorage(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual storage metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV virtual storage metrics", "err", err)
return err return err
} }
if err := c.collectVmNetwork(ch); err != nil { if err := c.collectVmNetwork(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual network metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV virtual network metrics", "err", err)
return err return err
} }
if err := c.collectVmMemory(ch); err != nil { if err := c.collectVmMemory(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual memory metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting hyperV virtual memory metrics", "err", err)
return err return err
} }
@@ -826,8 +828,7 @@ type Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
func (c *Collector) collectVmHealth(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmHealth(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary var dst []Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -858,8 +859,7 @@ type Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition struct {
func (c *Collector) collectVmVid(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmVid(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition var dst []Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -921,8 +921,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition struct {
func (c *Collector) collectVmHv(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmHv(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1058,8 +1057,7 @@ type Win32_PerfRawData_HvStats_HyperVHypervisor struct {
func (c *Collector) collectVmProcessor(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmProcessor(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisor var dst []Win32_PerfRawData_HvStats_HyperVHypervisor
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisor", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1088,10 +1086,9 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor struct {
PercentTotalRunTime uint PercentTotalRunTime uint
} }
func (c *Collector) collectHostLPUsage(ch chan<- prometheus.Metric) error { func (c *Collector) collectHostLPUsage(logger log.Logger, ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor var dst []Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorLogicalProcessor", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1102,7 +1099,7 @@ func (c *Collector) collectHostLPUsage(ch chan<- prometheus.Metric) error {
// The name format is Hv LP <core id> // The name format is Hv LP <core id>
parts := strings.Split(obj.Name, " ") parts := strings.Split(obj.Name, " ")
if len(parts) != 3 { if len(parts) != 3 {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectHostLPUsage: %q", obj.Name)) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectHostLPUsage: %q", obj.Name))
continue continue
} }
coreId := parts[2] coreId := parts[2]
@@ -1142,10 +1139,9 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor struct {
CPUWaitTimePerDispatch uint64 CPUWaitTimePerDispatch uint64
} }
func (c *Collector) collectHostCpuUsage(ch chan<- prometheus.Metric) error { func (c *Collector) collectHostCpuUsage(logger log.Logger, ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor var dst []Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1156,7 +1152,7 @@ func (c *Collector) collectHostCpuUsage(ch chan<- prometheus.Metric) error {
// The name format is Root VP <core id> // The name format is Root VP <core id>
parts := strings.Split(obj.Name, " ") parts := strings.Split(obj.Name, " ")
if len(parts) != 3 { if len(parts) != 3 {
_ = level.Warn(c.logger).Log("msg", "Unexpected format of Name in collectHostCpuUsage: "+obj.Name) _ = level.Warn(logger).Log("msg", "Unexpected format of Name in collectHostCpuUsage: "+obj.Name)
continue continue
} }
coreId := parts[2] coreId := parts[2]
@@ -1210,10 +1206,9 @@ type Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor struct {
CPUWaitTimePerDispatch uint64 CPUWaitTimePerDispatch uint64
} }
func (c *Collector) collectVmCpuUsage(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmCpuUsage(logger log.Logger, ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor var dst []Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1224,12 +1219,12 @@ func (c *Collector) collectVmCpuUsage(ch chan<- prometheus.Metric) error {
// The name format is <VM Name>:Hv VP <vcore id> // The name format is <VM Name>:Hv VP <vcore id>
parts := strings.Split(obj.Name, ":") parts := strings.Split(obj.Name, ":")
if len(parts) != 2 { if len(parts) != 2 {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectVmCpuUsage: %q, expected %q. Skipping.", obj.Name, "<VM Name>:Hv VP <vcore id>")) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectVmCpuUsage: %q, expected %q. Skipping.", obj.Name, "<VM Name>:Hv VP <vcore id>"))
continue continue
} }
coreParts := strings.Split(parts[1], " ") coreParts := strings.Split(parts[1], " ")
if len(coreParts) != 3 { if len(coreParts) != 3 {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of core identifier in collectVmCpuUsage: %q, expected %q. Skipping.", parts[1], "Hv VP <vcore id>")) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Unexpected format of core identifier in collectVmCpuUsage: %q, expected %q. Skipping.", parts[1], "Hv VP <vcore id>"))
continue continue
} }
vmName := parts[0] vmName := parts[0]
@@ -1305,8 +1300,7 @@ type Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch struct {
func (c *Collector) collectVmSwitch(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmSwitch(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch var dst []Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1470,8 +1464,7 @@ type Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter struct {
func (c *Collector) collectVmEthernet(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmEthernet(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter var dst []Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1539,8 +1532,7 @@ type Win32_PerfRawData_Counters_HyperVVirtualStorageDevice struct {
func (c *Collector) collectVmStorage(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmStorage(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_Counters_HyperVVirtualStorageDevice var dst []Win32_PerfRawData_Counters_HyperVVirtualStorageDevice
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_Counters_HyperVVirtualStorageDevice", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1608,8 +1600,7 @@ type Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter struct {
func (c *Collector) collectVmNetwork(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmNetwork(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter var dst []Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1681,8 +1672,7 @@ type Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM struct {
func (c *Collector) collectVmMemory(ch chan<- prometheus.Metric) error { func (c *Collector) collectVmMemory(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM var dst []Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_BalancerStats_HyperVDynamicMemoryVM", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -14,6 +14,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows/registry" "golang.org/x/sys/windows/registry"
) )
@@ -35,7 +36,6 @@ var ConfigDefaults = Config{
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
info *prometheus.Desc info *prometheus.Desc
@@ -162,7 +162,7 @@ type Collector struct {
iisVersion simpleVersion iisVersion simpleVersion
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -187,8 +187,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -252,11 +250,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{ return []string{
"Web Service", "Web Service",
"APP_POOL_WAS", "APP_POOL_WAS",
@@ -269,8 +263,10 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, _ *wmi.Client) error {
c.iisVersion = getIISVersion(c.logger) logger = log.With(logger, "collector", Name)
c.iisVersion = getIISVersion(logger)
c.info = prometheus.NewDesc( c.info = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"), prometheus.BuildFQName(types.Namespace, Name, "info"),
@@ -932,24 +928,25 @@ func getIISVersion(logger log.Logger) simpleVersion {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collectWebService(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err) if err := c.collectWebService(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting iis metrics", "err", err)
return err return err
} }
if err := c.collectAPP_POOL_WAS(ctx, ch); err != nil { if err := c.collectAPP_POOL_WAS(ctx, logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting iis metrics", "err", err)
return err return err
} }
if err := c.collectW3SVC_W3WP(ctx, ch); err != nil { if err := c.collectW3SVC_W3WP(ctx, logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting iis metrics", "err", err)
return err return err
} }
if err := c.collectWebServiceCache(ctx, ch); err != nil { if err := c.collectWebServiceCache(ctx, logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting iis metrics", "err", err)
return err return err
} }
@@ -1046,9 +1043,10 @@ func dedupIISNames[V hasGetIISName](services []V) map[string]V {
return webServiceDeDuplicated return webServiceDeDuplicated
} }
func (c *Collector) collectWebService(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectWebService(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var webService []perflibWebService var webService []perflibWebService
if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service"], &webService, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service"], &webService, logger); err != nil {
return err return err
} }
@@ -1338,9 +1336,10 @@ var applicationStates = map[uint32]string{
7: "Delete Pending", 7: "Delete Pending",
} }
func (c *Collector) collectAPP_POOL_WAS(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectAPP_POOL_WAS(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var APP_POOL_WAS []perflibAPP_POOL_WAS var APP_POOL_WAS []perflibAPP_POOL_WAS
if err := perflib.UnmarshalObject(ctx.PerfObjects["APP_POOL_WAS"], &APP_POOL_WAS, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["APP_POOL_WAS"], &APP_POOL_WAS, logger); err != nil {
return err return err
} }
@@ -1515,9 +1514,10 @@ type perflibW3SVC_W3WP_IIS8 struct {
WebSocketConnectionsRejected float64 `perflib:"WebSocket Connections Rejected / Sec"` WebSocketConnectionsRejected float64 `perflib:"WebSocket Connections Rejected / Sec"`
} }
func (c *Collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var W3SVC_W3WP []perflibW3SVC_W3WP var W3SVC_W3WP []perflibW3SVC_W3WP
if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP, logger); err != nil {
return err return err
} }
@@ -1775,7 +1775,7 @@ func (c *Collector) collectW3SVC_W3WP(ctx *types.ScrapeContext, ch chan<- promet
if c.iisVersion.major >= 8 { if c.iisVersion.major >= 8 {
var W3SVC_W3WP_IIS8 []perflibW3SVC_W3WP_IIS8 var W3SVC_W3WP_IIS8 []perflibW3SVC_W3WP_IIS8
if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP_IIS8, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["W3SVC_W3WP"], &W3SVC_W3WP_IIS8, logger); err != nil {
return err return err
} }
@@ -1912,9 +1912,10 @@ type perflibWebServiceCache struct {
ServiceCache_OutputCacheQueriesTotal float64 ServiceCache_OutputCacheQueriesTotal float64
} }
func (c *Collector) collectWebServiceCache(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectWebServiceCache(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var WebServiceCache []perflibWebServiceCache var WebServiceCache []perflibWebServiceCache
if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service Cache"], &WebServiceCache, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["Web Service Cache"], &WebServiceCache, logger); err != nil {
return err return err
} }

View File

@@ -9,6 +9,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/headers/slc" "github.com/prometheus-community/windows_exporter/pkg/headers/slc"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "license" const Name = "license"
@@ -28,12 +29,11 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
licenseStatus *prometheus.Desc licenseStatus *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -42,8 +42,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -55,11 +53,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -67,7 +61,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.licenseStatus = prometheus.NewDesc( c.licenseStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status"), prometheus.BuildFQName(types.Namespace, Name, "status"),
"Status of windows license", "Status of windows license",
@@ -80,9 +74,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting license metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting license metrics", "err", err)
return err return err
} }
return nil return nil

View File

@@ -16,6 +16,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
@@ -34,7 +35,6 @@ var ConfigDefaults = Config{
// A Collector is a Prometheus Collector for perflib logicalDisk metrics. // A Collector is a Prometheus Collector for perflib logicalDisk metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
avgReadQueue *prometheus.Desc avgReadQueue *prometheus.Desc
avgWriteQueue *prometheus.Desc avgWriteQueue *prometheus.Desc
@@ -64,7 +64,7 @@ type volumeInfo struct {
readonly float64 readonly float64
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -81,8 +81,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -126,11 +124,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"LogicalDisk"}, nil return []string{"LogicalDisk"}, nil
} }
@@ -138,7 +132,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.information = prometheus.NewDesc( c.information = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"), prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with logical disk information", "A metric with a constant '1' value labeled with logical disk information",
@@ -268,9 +262,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting logical_disk metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting logical_disk metrics", "err", err)
return err return err
} }
return nil return nil
@@ -299,7 +294,8 @@ type logicalDisk struct {
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"` AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var ( var (
err error err error
diskID string diskID string
@@ -307,7 +303,7 @@ func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
dst []logicalDisk dst []logicalDisk
) )
if err = perflib.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, c.logger); err != nil { if err = perflib.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, logger); err != nil {
return err return err
} }
@@ -320,12 +316,12 @@ func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
diskID, err = getDiskIDByVolume(volume.Name) diskID, err = getDiskIDByVolume(volume.Name)
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", "failed to get disk ID for "+volume.Name, "err", err) _ = level.Warn(logger).Log("msg", "failed to get disk ID for "+volume.Name, "err", err)
} }
info, err = getVolumeInfo(volume.Name) info, err = getVolumeInfo(volume.Name)
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", "failed to get volume information for %s"+volume.Name, "err", err) _ = level.Warn(logger).Log("msg", "failed to get volume information for %s"+volume.Name, "err", err)
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(

View File

@@ -9,8 +9,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "logon" const Name = "logon"
@@ -21,13 +21,13 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics. // A Collector is a Prometheus Collector for WMI metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
logonType *prometheus.Desc logonType *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -36,8 +36,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -49,11 +47,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -61,7 +55,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.logonType = prometheus.NewDesc( c.logonType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logon_type"), prometheus.BuildFQName(types.Namespace, Name, "logon_type"),
"Number of active logon sessions (LogonSession.LogonType)", "Number of active logon sessions (LogonSession.LogonType)",
@@ -73,9 +72,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting user metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting user metrics", "err", err)
return err return err
} }
return nil return nil
@@ -89,8 +89,7 @@ type Win32_LogonSession struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_LogonSession var dst []Win32_LogonSession
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_LogonSession", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {

View File

@@ -1,6 +1,7 @@
package collector package collector
import ( import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/pkg/collector/ad" "github.com/prometheus-community/windows_exporter/pkg/collector/ad"
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs" "github.com/prometheus-community/windows_exporter/pkg/collector/adcs"
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs" "github.com/prometheus-community/windows_exporter/pkg/collector/adfs"
@@ -21,11 +22,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk" "github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/pkg/collector/logon" "github.com/prometheus-community/windows_exporter/pkg/collector/logon"
"github.com/prometheus-community/windows_exporter/pkg/collector/memory" "github.com/prometheus-community/windows_exporter/pkg/collector/memory"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_cluster" "github.com/prometheus-community/windows_exporter/pkg/collector/mscluster"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_network"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_node"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_resource"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_resourcegroup"
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq" "github.com/prometheus-community/windows_exporter/pkg/collector/msmq"
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql" "github.com/prometheus-community/windows_exporter/pkg/collector/mssql"
"github.com/prometheus-community/windows_exporter/pkg/collector/net" "github.com/prometheus-community/windows_exporter/pkg/collector/net"
@@ -60,6 +57,12 @@ import (
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
) )
func NewBuilderWithFlags[C Collector](fn BuilderWithFlags[C]) BuilderWithFlags[Collector] {
return func(app *kingpin.Application) Collector {
return fn(app)
}
}
var BuildersWithFlags = map[string]BuilderWithFlags[Collector]{ var BuildersWithFlags = map[string]BuilderWithFlags[Collector]{
ad.Name: NewBuilderWithFlags(ad.NewWithFlags), ad.Name: NewBuilderWithFlags(ad.NewWithFlags),
adcs.Name: NewBuilderWithFlags(adcs.NewWithFlags), adcs.Name: NewBuilderWithFlags(adcs.NewWithFlags),
@@ -81,11 +84,7 @@ var BuildersWithFlags = map[string]BuilderWithFlags[Collector]{
logical_disk.Name: NewBuilderWithFlags(logical_disk.NewWithFlags), logical_disk.Name: NewBuilderWithFlags(logical_disk.NewWithFlags),
logon.Name: NewBuilderWithFlags(logon.NewWithFlags), logon.Name: NewBuilderWithFlags(logon.NewWithFlags),
memory.Name: NewBuilderWithFlags(memory.NewWithFlags), memory.Name: NewBuilderWithFlags(memory.NewWithFlags),
mscluster_cluster.Name: NewBuilderWithFlags(mscluster_cluster.NewWithFlags), mscluster.Name: NewBuilderWithFlags(mscluster.NewWithFlags),
mscluster_network.Name: NewBuilderWithFlags(mscluster_network.NewWithFlags),
mscluster_node.Name: NewBuilderWithFlags(mscluster_node.NewWithFlags),
mscluster_resource.Name: NewBuilderWithFlags(mscluster_resource.NewWithFlags),
mscluster_resourcegroup.Name: NewBuilderWithFlags(mscluster_resourcegroup.NewWithFlags),
msmq.Name: NewBuilderWithFlags(msmq.NewWithFlags), msmq.Name: NewBuilderWithFlags(msmq.NewWithFlags),
mssql.Name: NewBuilderWithFlags(mssql.NewWithFlags), mssql.Name: NewBuilderWithFlags(mssql.NewWithFlags),
net.Name: NewBuilderWithFlags(net.NewWithFlags), net.Name: NewBuilderWithFlags(net.NewWithFlags),

View File

@@ -12,6 +12,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "memory" const Name = "memory"
@@ -23,7 +24,6 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for perflib Memory metrics. // A Collector is a Prometheus Collector for perflib Memory metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
availableBytes *prometheus.Desc availableBytes *prometheus.Desc
cacheBytes *prometheus.Desc cacheBytes *prometheus.Desc
@@ -59,7 +59,7 @@ type Collector struct {
writeCopiesTotal *prometheus.Desc writeCopiesTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -68,8 +68,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -81,11 +79,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil return []string{"Memory"}, nil
} }
@@ -93,7 +87,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.availableBytes = prometheus.NewDesc( c.availableBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"), prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+ "The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
@@ -303,9 +297,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting memory metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting memory metrics", "err", err)
return err return err
} }
return nil return nil
@@ -348,9 +343,10 @@ type memory struct {
WriteCopiesPersec float64 `perflib:"Write Copies/sec"` WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []memory var dst []memory
if err := perflib.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, logger); err != nil {
return err return err
} }

View File

@@ -0,0 +1,306 @@
package mscluster
import (
"errors"
"fmt"
"slices"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
)
const Name = "mscluster"
type Config struct {
CollectorsEnabled []string `yaml:"collectors_enabled"`
}
var ConfigDefaults = Config{
CollectorsEnabled: []string{
"cluster",
"network",
"node",
"resource",
"resourcegroup",
},
}
// A Collector is a Prometheus Collector for WMI MSCluster_Cluster metrics.
type Collector struct {
config Config
wmiClient *wmi.Client
// cluster
clusterAddEvictDelay *prometheus.Desc
clusterAdminAccessPoint *prometheus.Desc
clusterAutoAssignNodeSite *prometheus.Desc
clusterAutoBalancerLevel *prometheus.Desc
clusterAutoBalancerMode *prometheus.Desc
clusterBackupInProgress *prometheus.Desc
clusterBlockCacheSize *prometheus.Desc
clusterClusSvcHangTimeout *prometheus.Desc
clusterClusSvcRegroupOpeningTimeout *prometheus.Desc
clusterClusSvcRegroupPruningTimeout *prometheus.Desc
clusterClusSvcRegroupStageTimeout *prometheus.Desc
clusterClusSvcRegroupTickInMilliseconds *prometheus.Desc
clusterClusterEnforcedAntiAffinity *prometheus.Desc
clusterClusterFunctionalLevel *prometheus.Desc
clusterClusterGroupWaitDelay *prometheus.Desc
clusterClusterLogLevel *prometheus.Desc
clusterClusterLogSize *prometheus.Desc
clusterClusterUpgradeVersion *prometheus.Desc
clusterCrossSiteDelay *prometheus.Desc
clusterCrossSiteThreshold *prometheus.Desc
clusterCrossSubnetDelay *prometheus.Desc
clusterCrossSubnetThreshold *prometheus.Desc
clusterCsvBalancer *prometheus.Desc
clusterDatabaseReadWriteMode *prometheus.Desc
clusterDefaultNetworkRole *prometheus.Desc
clusterDetectedCloudPlatform *prometheus.Desc
clusterDetectManagedEvents *prometheus.Desc
clusterDetectManagedEventsThreshold *prometheus.Desc
clusterDisableGroupPreferredOwnerRandomization *prometheus.Desc
clusterDrainOnShutdown *prometheus.Desc
clusterDynamicQuorumEnabled *prometheus.Desc
clusterEnableSharedVolumes *prometheus.Desc
clusterFixQuorum *prometheus.Desc
clusterGracePeriodEnabled *prometheus.Desc
clusterGracePeriodTimeout *prometheus.Desc
clusterGroupDependencyTimeout *prometheus.Desc
clusterHangRecoveryAction *prometheus.Desc
clusterIgnorePersistentStateOnStartup *prometheus.Desc
clusterLogResourceControls *prometheus.Desc
clusterLowerQuorumPriorityNodeId *prometheus.Desc
clusterMaxNumberOfNodes *prometheus.Desc
clusterMessageBufferLength *prometheus.Desc
clusterMinimumNeverPreemptPriority *prometheus.Desc
clusterMinimumPreemptorPriority *prometheus.Desc
clusterNetftIPSecEnabled *prometheus.Desc
clusterPlacementOptions *prometheus.Desc
clusterPlumbAllCrossSubnetRoutes *prometheus.Desc
clusterPreventQuorum *prometheus.Desc
clusterQuarantineDuration *prometheus.Desc
clusterQuarantineThreshold *prometheus.Desc
clusterQuorumArbitrationTimeMax *prometheus.Desc
clusterQuorumArbitrationTimeMin *prometheus.Desc
clusterQuorumLogFileSize *prometheus.Desc
clusterQuorumTypeValue *prometheus.Desc
clusterRequestReplyTimeout *prometheus.Desc
clusterResiliencyDefaultPeriod *prometheus.Desc
clusterResiliencyLevel *prometheus.Desc
clusterResourceDllDeadlockPeriod *prometheus.Desc
clusterRootMemoryReserved *prometheus.Desc
clusterRouteHistoryLength *prometheus.Desc
clusterS2DBusTypes *prometheus.Desc
clusterS2DCacheDesiredState *prometheus.Desc
clusterS2DCacheFlashReservePercent *prometheus.Desc
clusterS2DCachePageSizeKBytes *prometheus.Desc
clusterS2DEnabled *prometheus.Desc
clusterS2DIOLatencyThreshold *prometheus.Desc
clusterS2DOptimizations *prometheus.Desc
clusterSameSubnetDelay *prometheus.Desc
clusterSameSubnetThreshold *prometheus.Desc
clusterSecurityLevel *prometheus.Desc
clusterSecurityLevelForStorage *prometheus.Desc
clusterSharedVolumeVssWriterOperationTimeout *prometheus.Desc
clusterShutdownTimeoutInMinutes *prometheus.Desc
clusterUseClientAccessNetworksForSharedVolumes *prometheus.Desc
clusterWitnessDatabaseWriteTimeout *prometheus.Desc
clusterWitnessDynamicWeight *prometheus.Desc
clusterWitnessRestartInterval *prometheus.Desc
// network
networkCharacteristics *prometheus.Desc
networkFlags *prometheus.Desc
networkMetric *prometheus.Desc
networkRole *prometheus.Desc
networkState *prometheus.Desc
// node
nodeBuildNumber *prometheus.Desc
nodeCharacteristics *prometheus.Desc
nodeDetectedCloudPlatform *prometheus.Desc
nodeDynamicWeight *prometheus.Desc
nodeFlags *prometheus.Desc
nodeMajorVersion *prometheus.Desc
nodeMinorVersion *prometheus.Desc
nodeNeedsPreventQuorum *prometheus.Desc
nodeNodeDrainStatus *prometheus.Desc
nodeNodeHighestVersion *prometheus.Desc
nodeNodeLowestVersion *prometheus.Desc
nodeNodeWeight *prometheus.Desc
nodeState *prometheus.Desc
nodeStatusInformation *prometheus.Desc
resourceCharacteristics *prometheus.Desc
resourceDeadlockTimeout *prometheus.Desc
resourceEmbeddedFailureAction *prometheus.Desc
resourceFlags *prometheus.Desc
resourceIsAlivePollInterval *prometheus.Desc
resourceLooksAlivePollInterval *prometheus.Desc
resourceMonitorProcessId *prometheus.Desc
resourceOwnerNode *prometheus.Desc
resourcePendingTimeout *prometheus.Desc
resourceResourceClass *prometheus.Desc
resourceRestartAction *prometheus.Desc
resourceRestartDelay *prometheus.Desc
resourceRestartPeriod *prometheus.Desc
resourceRestartThreshold *prometheus.Desc
resourceRetryPeriodOnFailure *prometheus.Desc
resourceState *prometheus.Desc
resourceSubClass *prometheus.Desc
// ResourceGroup
resourceGroupAutoFailbackType *prometheus.Desc
resourceGroupCharacteristics *prometheus.Desc
resourceGroupColdStartSetting *prometheus.Desc
resourceGroupDefaultOwner *prometheus.Desc
resourceGroupFailbackWindowEnd *prometheus.Desc
resourceGroupFailbackWindowStart *prometheus.Desc
resourceGroupFailOverPeriod *prometheus.Desc
resourceGroupFailOverThreshold *prometheus.Desc
resourceGroupFlags *prometheus.Desc
resourceGroupGroupType *prometheus.Desc
resourceGroupOwnerNode *prometheus.Desc
resourceGroupPriority *prometheus.Desc
resourceGroupResiliencyPeriod *prometheus.Desc
resourceGroupState *prometheus.Desc
}
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
if config.CollectorsEnabled == nil {
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
}
c := &Collector{
config: *config,
}
return c
}
func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
c.config.CollectorsEnabled = make([]string, 0)
var collectorsEnabled string
app.Flag(
"collectors.mscluster.enabled",
"Comma-separated list of collectors to use.",
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.Action(func(*kingpin.ParseContext) error {
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
return nil
})
return c
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
return []string{"Memory"}, nil
}
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if len(c.config.CollectorsEnabled) == 0 {
return nil
}
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
if slices.Contains(c.config.CollectorsEnabled, "cluster") {
c.buildCluster()
}
if slices.Contains(c.config.CollectorsEnabled, "network") {
c.buildNetwork()
}
if slices.Contains(c.config.CollectorsEnabled, "node") {
c.buildNode()
}
if slices.Contains(c.config.CollectorsEnabled, "resource") {
c.buildResource()
}
if slices.Contains(c.config.CollectorsEnabled, "resourcegroup") {
c.buildResourceGroup()
}
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ log.Logger, ch chan<- prometheus.Metric) error {
if len(c.config.CollectorsEnabled) == 0 {
return nil
}
var (
err error
errs []error
nodeNames []string
)
if slices.Contains(c.config.CollectorsEnabled, "cluster") {
if err = c.collectCluster(ch); err != nil {
errs = append(errs, fmt.Errorf("failed to collect cluster metrics: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, "network") {
if err = c.collectNetwork(ch); err != nil {
errs = append(errs, fmt.Errorf("failed to collect network metrics: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, "node") {
if nodeNames, err = c.collectNode(ch); err != nil {
errs = append(errs, fmt.Errorf("failed to collect node metrics: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, "resource") {
if err = c.collectResource(ch, nodeNames); err != nil {
errs = append(errs, fmt.Errorf("failed to collect resource metrics: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, "resourcegroup") {
if err = c.collectResourceGroup(ch, nodeNames); err != nil {
errs = append(errs, fmt.Errorf("failed to collect resource group metrics: %w", err))
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}

View File

@@ -0,0 +1,102 @@
package mscluster
import (
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
const nameNetwork = Name + "_network"
// msClusterNetwork represents the MSCluster_Network WMI class
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-network
type msClusterNetwork struct {
Name string
Characteristics uint
Flags uint
Metric uint
Role uint
State uint
}
func (c *Collector) buildNetwork() {
c.networkCharacteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNetwork, "characteristics"),
"Provides the characteristics of the network.",
[]string{"name"},
nil,
)
c.networkFlags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNetwork, "flags"),
"Provides access to the flags set for the node. ",
[]string{"name"},
nil,
)
c.networkMetric = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNetwork, "metric"),
"The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false.",
[]string{"name"},
nil,
)
c.networkRole = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNetwork, "role"),
"Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both ",
[]string{"name"},
nil,
)
c.networkState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNetwork, "state"),
"Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up",
[]string{"name"},
nil,
)
}
// Collect sends the metric values for each metric
// to the provided prometheus metric channel.
func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error {
var dst []msClusterNetwork
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Network", &dst, nil, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.networkCharacteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.networkFlags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.networkMetric,
prometheus.GaugeValue,
float64(v.Metric),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.networkRole,
prometheus.GaugeValue,
float64(v.Role),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.networkState,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
}
return nil
}

View File

@@ -1,168 +1,15 @@
package mscluster_node package mscluster
import ( import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
const Name = "mscluster_node" const nameNode = Name + "_node"
type Config struct{} // msClusterNode represents the MSCluster_Node WMI class
var ConfigDefaults = Config{}
// Variable used by mscluster_resource and mscluster_resourcegroup.
var NodeName []string
// A Collector is a Prometheus Collector for WMI MSCluster_Node metrics.
type Collector struct {
config Config
logger log.Logger
buildNumber *prometheus.Desc
characteristics *prometheus.Desc
detectedCloudPlatform *prometheus.Desc
dynamicWeight *prometheus.Desc
flags *prometheus.Desc
majorVersion *prometheus.Desc
minorVersion *prometheus.Desc
needsPreventQuorum *prometheus.Desc
nodeDrainStatus *prometheus.Desc
nodeHighestVersion *prometheus.Desc
nodeLowestVersion *prometheus.Desc
nodeWeight *prometheus.Desc
state *prometheus.Desc
statusInformation *prometheus.Desc
}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.buildNumber = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "build_number"),
"Provides access to the node's BuildNumber property.",
[]string{"name"},
nil,
)
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides access to the characteristics set for the node.",
[]string{"name"},
nil,
)
c.detectedCloudPlatform = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "detected_cloud_platform"),
"(DetectedCloudPlatform)",
[]string{"name"},
nil,
)
c.dynamicWeight = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dynamic_weight"),
"The dynamic vote weight of the node adjusted by dynamic quorum feature.",
[]string{"name"},
nil,
)
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the node.",
[]string{"name"},
nil,
)
c.majorVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "major_version"),
"Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed.",
[]string{"name"},
nil,
)
c.minorVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "minor_version"),
"Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed.",
[]string{"name"},
nil,
)
c.needsPreventQuorum = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "needs_prevent_quorum"),
"Whether the cluster service on that node should be started with prevent quorum flag.",
[]string{"name"},
nil,
)
c.nodeDrainStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_drain_status"),
"The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed",
[]string{"name"},
nil,
)
c.nodeHighestVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_highest_version"),
"Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
)
c.nodeLowestVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_lowest_version"),
"Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
)
c.nodeWeight = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "node_weight"),
"The vote weight of the node.",
[]string{"name"},
nil,
)
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining",
[]string{"name"},
nil,
)
c.statusInformation = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "status_information"),
"The isolation or quarantine status of the node.",
[]string{"name"},
nil,
)
return nil
}
// MSCluster_Node docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-node // - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-node
type MSCluster_Node struct { type msClusterNode struct {
Name string Name string
BuildNumber uint BuildNumber uint
@@ -181,118 +28,205 @@ type MSCluster_Node struct {
StatusInformation uint StatusInformation uint
} }
func (c *Collector) buildNode() {
c.nodeBuildNumber = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "build_number"),
"Provides access to the node's BuildNumber property.",
[]string{"name"},
nil,
)
c.nodeCharacteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "characteristics"),
"Provides access to the characteristics set for the node.",
[]string{"name"},
nil,
)
c.nodeDetectedCloudPlatform = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "detected_cloud_platform"),
"(DetectedCloudPlatform)",
[]string{"name"},
nil,
)
c.nodeDynamicWeight = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "dynamic_weight"),
"The dynamic vote weight of the node adjusted by dynamic quorum feature.",
[]string{"name"},
nil,
)
c.nodeFlags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "flags"),
"Provides access to the flags set for the node.",
[]string{"name"},
nil,
)
c.nodeMajorVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "major_version"),
"Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed.",
[]string{"name"},
nil,
)
c.nodeMinorVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "minor_version"),
"Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed.",
[]string{"name"},
nil,
)
c.nodeNeedsPreventQuorum = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "needs_prevent_quorum"),
"Whether the cluster service on that node should be started with prevent quorum flag.",
[]string{"name"},
nil,
)
c.nodeNodeDrainStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "node_drain_status"),
"The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed",
[]string{"name"},
nil,
)
c.nodeNodeHighestVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "node_highest_version"),
"Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
)
c.nodeNodeLowestVersion = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "node_lowest_version"),
"Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate.",
[]string{"name"},
nil,
)
c.nodeNodeWeight = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "node_weight"),
"The vote weight of the node.",
[]string{"name"},
nil,
)
c.nodeState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "state"),
"Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining",
[]string{"name"},
nil,
)
c.nodeStatusInformation = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameNode, "status_information"),
"The isolation or quarantine status of the node.",
[]string{"name"},
nil,
)
}
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectNode(ch chan<- prometheus.Metric) ([]string, error) {
var dst []MSCluster_Node var dst []msClusterNode
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil { if err := c.wmiClient.Query("SELECT * FROM MSCluster_Node", &dst, nil, "root/MSCluster"); err != nil {
return err return nil, err
} }
NodeName = []string{} nodeNames := make([]string, 0, len(dst))
for _, v := range dst { for _, v := range dst {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.buildNumber, c.nodeBuildNumber,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.BuildNumber), float64(v.BuildNumber),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.characteristics, c.nodeCharacteristics,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.Characteristics), float64(v.Characteristics),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.detectedCloudPlatform, c.nodeDetectedCloudPlatform,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.DetectedCloudPlatform), float64(v.DetectedCloudPlatform),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.dynamicWeight, c.nodeDynamicWeight,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.DynamicWeight), float64(v.DynamicWeight),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.flags, c.nodeFlags,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.Flags), float64(v.Flags),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.majorVersion, c.nodeMajorVersion,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.MajorVersion), float64(v.MajorVersion),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.minorVersion, c.nodeMinorVersion,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.MinorVersion), float64(v.MinorVersion),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.needsPreventQuorum, c.nodeNeedsPreventQuorum,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.NeedsPreventQuorum), float64(v.NeedsPreventQuorum),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.nodeDrainStatus, c.nodeNodeDrainStatus,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.NodeDrainStatus), float64(v.NodeDrainStatus),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.nodeHighestVersion, c.nodeNodeHighestVersion,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.NodeHighestVersion), float64(v.NodeHighestVersion),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.nodeLowestVersion, c.nodeNodeLowestVersion,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.NodeLowestVersion), float64(v.NodeLowestVersion),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.nodeWeight, c.nodeNodeWeight,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.NodeWeight), float64(v.NodeWeight),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.state, c.nodeState,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.State), float64(v.State),
v.Name, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.statusInformation, c.nodeStatusInformation,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.StatusInformation), float64(v.StatusInformation),
v.Name, v.Name,
) )
NodeName = append(NodeName, v.Name) nodeNames = append(nodeNames, v.Name)
} }
return nil return nodeNames, nil
} }

View File

@@ -1,193 +1,15 @@
package mscluster_resource package mscluster
import ( import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_node"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
const Name = "mscluster_resource" const nameResource = Name + "_resource"
type Config struct{} // msClusterResource represents the MSCluster_Resource WMI class
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI MSCluster_Resource metrics.
type Collector struct {
config Config
logger log.Logger
characteristics *prometheus.Desc
deadlockTimeout *prometheus.Desc
embeddedFailureAction *prometheus.Desc
flags *prometheus.Desc
isAlivePollInterval *prometheus.Desc
looksAlivePollInterval *prometheus.Desc
monitorProcessId *prometheus.Desc
ownerNode *prometheus.Desc
pendingTimeout *prometheus.Desc
resourceClass *prometheus.Desc
restartAction *prometheus.Desc
restartDelay *prometheus.Desc
restartPeriod *prometheus.Desc
restartThreshold *prometheus.Desc
retryPeriodOnFailure *prometheus.Desc
state *prometheus.Desc
subclass *prometheus.Desc
}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides the characteristics of the object.",
[]string{"type", "owner_group", "name"},
nil,
)
c.deadlockTimeout = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "deadlock_timeout"),
"Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.embeddedFailureAction = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "embedded_failure_action"),
"The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
)
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the object.",
[]string{"type", "owner_group", "name"},
nil,
)
c.isAlivePollInterval = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "is_alive_poll_interval"),
"Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.looksAlivePollInterval = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "looks_alive_poll_interval"),
"Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.monitorProcessId = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "monitor_process_id"),
"Provides the process ID of the resource host service that is currently hosting the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource. 0: Not hosted; 1: Hosted",
[]string{"type", "owner_group", "node_name", "name"},
nil,
)
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource. 0: Not hosted; 1: Hosted",
[]string{"type", "owner_group", "node_name", "name"},
nil,
)
c.pendingTimeout = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pending_timeout"),
"Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceClass = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "resource_class"),
"Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown ",
[]string{"type", "owner_group", "name"},
nil,
)
c.restartAction = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_action"),
"Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails.",
[]string{"type", "owner_group", "name"},
nil,
)
c.restartDelay = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_delay"),
"Indicates the time delay before a failed resource is restarted.",
[]string{"type", "owner_group", "name"},
nil,
)
c.restartPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_period"),
"Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.restartThreshold = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "restart_threshold"),
"Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property.",
[]string{"type", "owner_group", "name"},
nil,
)
c.retryPeriodOnFailure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "retry_period_on_failure"),
"Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
)
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending ",
[]string{"type", "owner_group", "name"},
nil,
)
c.subclass = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "subclass"),
"Provides the list of references to nodes that can be the owner of this resource.",
[]string{"type", "owner_group", "name"},
nil,
)
return nil
}
// MSCluster_Resource docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resource // - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resource
type MSCluster_Resource struct { type msClusterResource struct {
Name string Name string
Type string Type string
OwnerGroup string OwnerGroup string
@@ -211,138 +33,247 @@ type MSCluster_Resource struct {
Subclass uint Subclass uint
} }
func (c *Collector) buildResource() {
c.resourceCharacteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "characteristics"),
"Provides the characteristics of the object.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceDeadlockTimeout = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "deadlock_timeout"),
"Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceEmbeddedFailureAction = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "embedded_failure_action"),
"The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceFlags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "flags"),
"Provides access to the flags set for the object.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceIsAlivePollInterval = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "is_alive_poll_interval"),
"Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceLooksAlivePollInterval = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "looks_alive_poll_interval"),
"Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceMonitorProcessId = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "monitor_process_id"),
"Provides the process ID of the resource host service that is currently hosting the resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceOwnerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "owner_node"),
"The node hosting the resource. 0: Not hosted; 1: Hosted",
[]string{"type", "owner_group", "node_name", "name"},
nil,
)
c.resourceOwnerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "owner_node"),
"The node hosting the resource. 0: Not hosted; 1: Hosted",
[]string{"type", "owner_group", "node_name", "name"},
nil,
)
c.resourcePendingTimeout = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "pending_timeout"),
"Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceResourceClass = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "resource_class"),
"Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown ",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceRestartAction = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "restart_action"),
"Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceRestartDelay = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "restart_delay"),
"Indicates the time delay before a failed resource is restarted.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceRestartPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "restart_period"),
"Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceRestartThreshold = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "restart_threshold"),
"Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceRetryPeriodOnFailure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "retry_period_on_failure"),
"Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it.",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "state"),
"The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending ",
[]string{"type", "owner_group", "name"},
nil,
)
c.resourceSubClass = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResource, "subclass"),
"Provides the list of references to nodes that can be the owner of this resource.",
[]string{"type", "owner_group", "name"},
nil,
)
}
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectResource(ch chan<- prometheus.Metric, nodeNames []string) error {
var dst []MSCluster_Resource var dst []msClusterResource
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil { if err := c.wmiClient.Query("SELECT * FROM MSCluster_Resource", &dst, nil, "root/MSCluster"); err != nil {
return err return err
} }
for _, v := range dst { for _, v := range dst {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.characteristics, c.resourceCharacteristics,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.Characteristics), float64(v.Characteristics),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.deadlockTimeout, c.resourceDeadlockTimeout,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.DeadlockTimeout), float64(v.DeadlockTimeout),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.embeddedFailureAction, c.resourceEmbeddedFailureAction,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.EmbeddedFailureAction), float64(v.EmbeddedFailureAction),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.flags, c.resourceFlags,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.Flags), float64(v.Flags),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.isAlivePollInterval, c.resourceIsAlivePollInterval,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.IsAlivePollInterval), float64(v.IsAlivePollInterval),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.looksAlivePollInterval, c.resourceLooksAlivePollInterval,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.LooksAlivePollInterval), float64(v.LooksAlivePollInterval),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.monitorProcessId, c.resourceMonitorProcessId,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.MonitorProcessId), float64(v.MonitorProcessId),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
if mscluster_node.NodeName != nil { for _, nodeName := range nodeNames {
for _, node_name := range mscluster_node.NodeName { isCurrentState := 0.0
isCurrentState := 0.0 if v.OwnerNode == nodeName {
if v.OwnerNode == node_name { isCurrentState = 1.0
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.ownerNode,
prometheus.GaugeValue,
isCurrentState,
v.Type, v.OwnerGroup, node_name, v.Name,
)
} }
ch <- prometheus.MustNewConstMetric(
c.resourceOwnerNode,
prometheus.GaugeValue,
isCurrentState,
v.Type, v.OwnerGroup, nodeName, v.Name,
)
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.pendingTimeout, c.resourcePendingTimeout,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.PendingTimeout), float64(v.PendingTimeout),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.resourceClass, c.resourceResourceClass,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.ResourceClass), float64(v.ResourceClass),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.restartAction, c.resourceRestartAction,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.RestartAction), float64(v.RestartAction),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.restartDelay, c.resourceRestartDelay,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.RestartDelay), float64(v.RestartDelay),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.restartPeriod, c.resourceRestartPeriod,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.RestartPeriod), float64(v.RestartPeriod),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.restartThreshold, c.resourceRestartThreshold,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.RestartThreshold), float64(v.RestartThreshold),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.retryPeriodOnFailure, c.resourceRetryPeriodOnFailure,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.RetryPeriodOnFailure), float64(v.RetryPeriodOnFailure),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.state, c.resourceState,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.State), float64(v.State),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.subclass, c.resourceSubClass,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v.Subclass), float64(v.Subclass),
v.Type, v.OwnerGroup, v.Name, v.Type, v.OwnerGroup, v.Name,

View File

@@ -0,0 +1,240 @@
package mscluster
import (
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
const nameResourceGroup = Name + "_resourcegroup"
// msClusterResourceGroup represents the MSCluster_ResourceGroup WMI class
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resourcegroup
type msClusterResourceGroup struct {
Name string
AutoFailbackType uint
Characteristics uint
ColdStartSetting uint
DefaultOwner uint
FailbackWindowEnd int
FailbackWindowStart int
FailoverPeriod uint
FailoverThreshold uint
Flags uint
GroupType uint
OwnerNode string
Priority uint
ResiliencyPeriod uint
State uint
}
func (c *Collector) buildResourceGroup() {
c.resourceGroupAutoFailbackType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "auto_failback_type"),
"Provides access to the group's AutoFailbackType property.",
[]string{"name"},
nil,
)
c.resourceGroupCharacteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "characteristics"),
"Provides the characteristics of the group.",
[]string{"name"},
nil,
)
c.resourceGroupColdStartSetting = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "cold_start_setting"),
"Indicates whether a group can start after a cluster cold start.",
[]string{"name"},
nil,
)
c.resourceGroupDefaultOwner = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "default_owner"),
"Number of the last node the resource group was activated on or explicitly moved to.",
[]string{"name"},
nil,
)
c.resourceGroupFailbackWindowEnd = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failback_window_end"),
"The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
)
c.resourceGroupFailbackWindowStart = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failback_window_start"),
"The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
)
c.resourceGroupFailOverPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failover_period"),
"The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur.",
[]string{"name"},
nil,
)
c.resourceGroupFailOverThreshold = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failover_threshold"),
"The FailoverThreshold property specifies the maximum number of failover attempts.",
[]string{"name"},
nil,
)
c.resourceGroupFlags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "flags"),
"Provides access to the flags set for the group. ",
[]string{"name"},
nil,
)
c.resourceGroupGroupType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "group_type"),
"The Type of the resource group.",
[]string{"name"},
nil,
)
c.resourceGroupOwnerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "owner_node"),
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
[]string{"node_name", "name"},
nil,
)
c.resourceGroupOwnerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "owner_node"),
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
[]string{"node_name", "name"},
nil,
)
c.resourceGroupPriority = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "priority"),
"Priority value of the resource group",
[]string{"name"},
nil,
)
c.resourceGroupResiliencyPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "resiliency_period"),
"The resiliency period for this group, in seconds.",
[]string{"name"},
nil,
)
c.resourceGroupState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "state"),
"The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending",
[]string{"name"},
nil,
)
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) collectResourceGroup(ch chan<- prometheus.Metric, nodeNames []string) error {
var dst []msClusterResourceGroup
if err := c.wmiClient.Query("SELECT * FROM MSCluster_ResourceGroup", &dst, nil, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.resourceGroupAutoFailbackType,
prometheus.GaugeValue,
float64(v.AutoFailbackType),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupCharacteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupColdStartSetting,
prometheus.GaugeValue,
float64(v.ColdStartSetting),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupDefaultOwner,
prometheus.GaugeValue,
float64(v.DefaultOwner),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupFailbackWindowEnd,
prometheus.GaugeValue,
float64(v.FailbackWindowEnd),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupFailbackWindowStart,
prometheus.GaugeValue,
float64(v.FailbackWindowStart),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupFailOverPeriod,
prometheus.GaugeValue,
float64(v.FailoverPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupFailOverThreshold,
prometheus.GaugeValue,
float64(v.FailoverThreshold),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupFlags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupGroupType,
prometheus.GaugeValue,
float64(v.GroupType),
v.Name,
)
for _, nodeName := range nodeNames {
isCurrentState := 0.0
if v.OwnerNode == nodeName {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.resourceGroupOwnerNode,
prometheus.GaugeValue,
isCurrentState,
nodeName, v.Name,
)
}
ch <- prometheus.MustNewConstMetric(
c.resourceGroupPriority,
prometheus.GaugeValue,
float64(v.Priority),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupResiliencyPeriod,
prometheus.GaugeValue,
float64(v.ResiliencyPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resourceGroupState,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
}
return nil
}

View File

@@ -1,156 +0,0 @@
package mscluster_network
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "mscluster_network"
type Config struct{}
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI MSCluster_Network metrics.
type Collector struct {
config Config
logger log.Logger
characteristics *prometheus.Desc
flags *prometheus.Desc
metric *prometheus.Desc
role *prometheus.Desc
state *prometheus.Desc
}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides the characteristics of the network.",
[]string{"name"},
nil,
)
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the node. ",
[]string{"name"},
nil,
)
c.metric = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "metric"),
"The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false.",
[]string{"name"},
nil,
)
c.role = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "role"),
"Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both ",
[]string{"name"},
nil,
)
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up",
[]string{"name"},
nil,
)
return nil
}
// MSCluster_Network docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-network
type MSCluster_Network struct {
Name string
Characteristics uint
Flags uint
Metric uint
Role uint
State uint
}
// Collect sends the metric values for each metric
// to the provided prometheus metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_Network
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.metric,
prometheus.GaugeValue,
float64(v.Metric),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.role,
prometheus.GaugeValue,
float64(v.Role),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.state,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
}
return nil
}

View File

@@ -1,306 +0,0 @@
package mscluster_resourcegroup
import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster_node"
"github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "mscluster_resourcegroup"
type Config struct{}
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI MSCluster_ResourceGroup metrics.
type Collector struct {
config Config
logger log.Logger
autoFailbackType *prometheus.Desc
characteristics *prometheus.Desc
coldStartSetting *prometheus.Desc
defaultOwner *prometheus.Desc
failbackWindowEnd *prometheus.Desc
failbackWindowStart *prometheus.Desc
failOverPeriod *prometheus.Desc
failOverThreshold *prometheus.Desc
flags *prometheus.Desc
groupType *prometheus.Desc
ownerNode *prometheus.Desc
priority *prometheus.Desc
resiliencyPeriod *prometheus.Desc
state *prometheus.Desc
}
func New(logger log.Logger, config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
c.SetLogger(logger)
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Memory"}, nil
}
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build() error {
c.autoFailbackType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "auto_failback_type"),
"Provides access to the group's AutoFailbackType property.",
[]string{"name"},
nil,
)
c.characteristics = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "characteristics"),
"Provides the characteristics of the group.",
[]string{"name"},
nil,
)
c.coldStartSetting = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cold_start_setting"),
"Indicates whether a group can start after a cluster cold start.",
[]string{"name"},
nil,
)
c.defaultOwner = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "default_owner"),
"Number of the last node the resource group was activated on or explicitly moved to.",
[]string{"name"},
nil,
)
c.failbackWindowEnd = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failback_window_end"),
"The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
)
c.failbackWindowStart = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failback_window_start"),
"The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node.",
[]string{"name"},
nil,
)
c.failOverPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_period"),
"The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur.",
[]string{"name"},
nil,
)
c.failOverThreshold = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "failover_threshold"),
"The FailoverThreshold property specifies the maximum number of failover attempts.",
[]string{"name"},
nil,
)
c.flags = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "flags"),
"Provides access to the flags set for the group. ",
[]string{"name"},
nil,
)
c.groupType = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "group_type"),
"The Type of the resource group.",
[]string{"name"},
nil,
)
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
[]string{"node_name", "name"},
nil,
)
c.ownerNode = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "owner_node"),
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
[]string{"node_name", "name"},
nil,
)
c.priority = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "priority"),
"Priority value of the resource group",
[]string{"name"},
nil,
)
c.resiliencyPeriod = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "resiliency_period"),
"The resiliency period for this group, in seconds.",
[]string{"name"},
nil,
)
c.state = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "state"),
"The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending",
[]string{"name"},
nil,
)
return nil
}
// MSCluster_ResourceGroup docs:
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resourcegroup
type MSCluster_ResourceGroup struct {
Name string
AutoFailbackType uint
Characteristics uint
ColdStartSetting uint
DefaultOwner uint
FailbackWindowEnd int
FailbackWindowStart int
FailoverPeriod uint
FailoverThreshold uint
Flags uint
GroupType uint
OwnerNode string
Priority uint
ResiliencyPeriod uint
State uint
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error {
var dst []MSCluster_ResourceGroup
q := wmi.QueryAll(&dst, c.logger)
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
return err
}
for _, v := range dst {
ch <- prometheus.MustNewConstMetric(
c.autoFailbackType,
prometheus.GaugeValue,
float64(v.AutoFailbackType),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.characteristics,
prometheus.GaugeValue,
float64(v.Characteristics),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.coldStartSetting,
prometheus.GaugeValue,
float64(v.ColdStartSetting),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.defaultOwner,
prometheus.GaugeValue,
float64(v.DefaultOwner),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.failbackWindowEnd,
prometheus.GaugeValue,
float64(v.FailbackWindowEnd),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.failbackWindowStart,
prometheus.GaugeValue,
float64(v.FailbackWindowStart),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.failOverPeriod,
prometheus.GaugeValue,
float64(v.FailoverPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.failOverThreshold,
prometheus.GaugeValue,
float64(v.FailoverThreshold),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.flags,
prometheus.GaugeValue,
float64(v.Flags),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.groupType,
prometheus.GaugeValue,
float64(v.GroupType),
v.Name,
)
if mscluster_node.NodeName != nil {
for _, node_name := range mscluster_node.NodeName {
isCurrentState := 0.0
if v.OwnerNode == node_name {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.ownerNode,
prometheus.GaugeValue,
isCurrentState,
node_name, v.Name,
)
}
}
ch <- prometheus.MustNewConstMetric(
c.priority,
prometheus.GaugeValue,
float64(v.Priority),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.resiliencyPeriod,
prometheus.GaugeValue,
float64(v.ResiliencyPeriod),
v.Name,
)
ch <- prometheus.MustNewConstMetric(
c.state,
prometheus.GaugeValue,
float64(v.State),
v.Name,
)
}
return nil
}

View File

@@ -3,6 +3,7 @@
package msmq package msmq
import ( import (
"errors"
"strings" "strings"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
@@ -10,8 +11,8 @@ import (
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "msmq" const Name = "msmq"
@@ -26,8 +27,8 @@ var ConfigDefaults = Config{
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
bytesInJournalQueue *prometheus.Desc bytesInJournalQueue *prometheus.Desc
bytesInQueue *prometheus.Desc bytesInQueue *prometheus.Desc
@@ -35,7 +36,7 @@ type Collector struct {
messagesInQueue *prometheus.Desc messagesInQueue *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -48,8 +49,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -69,11 +68,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -81,9 +76,17 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, wmiClient *wmi.Client) error {
logger = log.With(logger, "collector", Name)
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
if *c.config.QueryWhereClause == "" { if *c.config.QueryWhereClause == "" {
_ = level.Warn(c.logger).Log("msg", "No where-clause specified for msmq collector. This will generate a very large number of metrics!") _ = level.Warn(logger).Log("msg", "No where-clause specified for msmq collector. This will generate a very large number of metrics!")
} }
c.bytesInJournalQueue = prometheus.NewDesc( c.bytesInJournalQueue = prometheus.NewDesc(
@@ -115,9 +118,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting msmq metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting msmq metrics", "err", err)
return err return err
} }
return nil return nil
@@ -135,8 +139,12 @@ type msmqQueue struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []msmqQueue var dst []msmqQueue
q := wmi.QueryAllForClassWhere(&dst, "Win32_PerfRawData_MSMQ_MSMQQueue", *c.config.QueryWhereClause, c.logger) query := "SELECT * FROM Win32_PerfRawData_MSMQ_MSMQQueue"
if err := wmi.Query(q, &dst); err != nil { if *c.config.QueryWhereClause != "" {
query += " WHERE " + *c.config.QueryWhereClause
}
if err := c.wmiClient.Query(query, &dst); err != nil {
return err return err
} }

View File

@@ -17,6 +17,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows/registry" "golang.org/x/sys/windows/registry"
) )
@@ -142,7 +143,6 @@ func mssqlGetPerfObjectName(sqlInstance string, collector string) string {
// A Collector is a Prometheus Collector for various WMI Win32_PerfRawData_MSSQLSERVER_* metrics. // A Collector is a Prometheus Collector for various WMI Win32_PerfRawData_MSSQLSERVER_* metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
// meta // meta
mssqlScrapeDurationDesc *prometheus.Desc mssqlScrapeDurationDesc *prometheus.Desc
@@ -414,7 +414,7 @@ type Collector struct {
mssqlChildCollectorFailure int mssqlChildCollectorFailure int
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -427,8 +427,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -480,12 +478,8 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(logger log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name) c.mssqlInstances = getMSSQLInstances(logger)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
c.mssqlInstances = getMSSQLInstances(c.logger)
perfCounters := make([]string, 0, len(c.mssqlInstances)*len(c.config.CollectorsEnabled)) perfCounters := make([]string, 0, len(c.mssqlInstances)*len(c.config.CollectorsEnabled))
for instance := range c.mssqlInstances { for instance := range c.mssqlInstances {
@@ -501,7 +495,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
// Result must order, to prevent test failures. // Result must order, to prevent test failures.
sort.Strings(c.config.CollectorsEnabled) sort.Strings(c.config.CollectorsEnabled)
@@ -1977,24 +1971,24 @@ func (c *Collector) Build() error {
return nil return nil
} }
type mssqlCollectorFunc func(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error type mssqlCollectorFunc func(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error
func (c *Collector) execute(ctx *types.ScrapeContext, name string, fn mssqlCollectorFunc, ch chan<- prometheus.Metric, sqlInstance string, wg *sync.WaitGroup) { func (c *Collector) execute(ctx *types.ScrapeContext, logger log.Logger, name string, fn mssqlCollectorFunc, ch chan<- prometheus.Metric, sqlInstance string, wg *sync.WaitGroup) {
// Reset failure counter on each scrape // Reset failure counter on each scrape
c.mssqlChildCollectorFailure = 0 c.mssqlChildCollectorFailure = 0
defer wg.Done() defer wg.Done()
begin := time.Now() begin := time.Now()
err := fn(ctx, ch, sqlInstance) err := fn(ctx, logger, ch, sqlInstance)
duration := time.Since(begin) duration := time.Since(begin)
var success float64 var success float64
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s failed after %fs", name, duration.Seconds()), "err", err) _ = level.Error(logger).Log("msg", fmt.Sprintf("mssql class collector %s failed after %fs", name, duration.Seconds()), "err", err)
success = 0 success = 0
c.mssqlChildCollectorFailure++ c.mssqlChildCollectorFailure++
} else { } else {
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s succeeded after %fs.", name, duration.Seconds())) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql class collector %s succeeded after %fs.", name, duration.Seconds()))
success = 1 success = 1
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
@@ -2013,7 +2007,8 @@ func (c *Collector) execute(ctx *types.ScrapeContext, name string, fn mssqlColle
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
for sqlInstance := range c.mssqlInstances { for sqlInstance := range c.mssqlInstances {
@@ -2021,7 +2016,7 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
function := c.mssqlCollectors[name] function := c.mssqlCollectors[name]
wg.Add(1) wg.Add(1)
go c.execute(ctx, name, function, ch, sqlInstance, &wg) go c.execute(ctx, logger, name, function, ch, sqlInstance, &wg)
} }
} }
@@ -2084,11 +2079,11 @@ type mssqlAccessMethods struct {
WorktablesFromCacheRatioBase float64 `perflib:"Worktables From Cache Base_Base"` WorktablesFromCacheRatioBase float64 `perflib:"Worktables From Cache Base_Base"`
} }
func (c *Collector) collectAccessMethods(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectAccessMethods(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlAccessMethods var dst []mssqlAccessMethods
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst, logger); err != nil {
return err return err
} }
@@ -2419,11 +2414,11 @@ type mssqlAvailabilityReplica struct {
SendstoTransportPerSec float64 `perflib:"Sends to Transport/sec"` SendstoTransportPerSec float64 `perflib:"Sends to Transport/sec"`
} }
func (c *Collector) collectAvailabilityReplica(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectAvailabilityReplica(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlAvailabilityReplica var dst []mssqlAvailabilityReplica
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst, logger); err != nil {
return err return err
} }
@@ -2527,11 +2522,11 @@ type mssqlBufferManager struct {
TargetPages float64 `perflib:"Target pages"` TargetPages float64 `perflib:"Target pages"`
} }
func (c *Collector) collectBufferManager(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectBufferManager(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlBufferManager var dst []mssqlBufferManager
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst, logger); err != nil {
return err return err
} }
@@ -2731,11 +2726,11 @@ type mssqlDatabaseReplica struct {
TransactionDelay float64 `perflib:"Transaction Delay"` TransactionDelay float64 `perflib:"Transaction Delay"`
} }
func (c *Collector) collectDatabaseReplica(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectDatabaseReplica(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlDatabaseReplica var dst []mssqlDatabaseReplica
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst, logger); err != nil {
return err return err
} }
@@ -2970,11 +2965,11 @@ type mssqlDatabases struct {
XTPMemoryUsedKB float64 `perflib:"XTP Memory Used (KB)"` XTPMemoryUsedKB float64 `perflib:"XTP Memory Used (KB)"`
} }
func (c *Collector) collectDatabases(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectDatabases(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlDatabases var dst []mssqlDatabases
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst, logger); err != nil {
return err return err
} }
@@ -3352,11 +3347,11 @@ type mssqlGeneralStatistics struct {
UserConnections float64 `perflib:"User Connections"` UserConnections float64 `perflib:"User Connections"`
} }
func (c *Collector) collectGeneralStatistics(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectGeneralStatistics(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlGeneralStatistics var dst []mssqlGeneralStatistics
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst, logger); err != nil {
return err return err
} }
@@ -3547,11 +3542,11 @@ type mssqlLocks struct {
NumberOfDeadlocksPerSec float64 `perflib:"Number of Deadlocks/sec"` NumberOfDeadlocksPerSec float64 `perflib:"Number of Deadlocks/sec"`
} }
func (c *Collector) collectLocks(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectLocks(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlLocks var dst []mssqlLocks
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst, logger); err != nil {
return err return err
} }
@@ -3645,11 +3640,11 @@ type mssqlMemoryManager struct {
TotalServerMemoryKB float64 `perflib:"Total Server Memory (KB)"` TotalServerMemoryKB float64 `perflib:"Total Server Memory (KB)"`
} }
func (c *Collector) collectMemoryManager(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectMemoryManager(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlMemoryManager var dst []mssqlMemoryManager
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst, logger); err != nil {
return err return err
} }
@@ -3814,11 +3809,11 @@ type mssqlSQLStatistics struct {
UnsafeAutoParamsPerSec float64 `perflib:"Unsafe Auto-Params/sec"` UnsafeAutoParamsPerSec float64 `perflib:"Unsafe Auto-Params/sec"`
} }
func (c *Collector) collectSQLStats(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectSQLStats(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlSQLStatistics var dst []mssqlSQLStatistics
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst, logger); err != nil {
return err return err
} }
@@ -3922,11 +3917,11 @@ type mssqlWaitStatistics struct {
WaitStatsTransactionOwnershipWaits float64 `perflib:"Transaction ownership waits"` WaitStatsTransactionOwnershipWaits float64 `perflib:"Transaction ownership waits"`
} }
func (c *Collector) collectWaitStats(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectWaitStats(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlWaitStatistics var dst []mssqlWaitStatistics
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst, logger); err != nil {
return err return err
} }
@@ -4028,11 +4023,11 @@ type mssqlSQLErrors struct {
// Win32_PerfRawData_MSSQLSERVER_SQLServerErrors docs: // Win32_PerfRawData_MSSQLSERVER_SQLServerErrors docs:
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object // - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object
func (c *Collector) collectSQLErrors(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectSQLErrors(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlSQLErrors var dst []mssqlSQLErrors
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst, logger); err != nil {
return err return err
} }
@@ -4071,11 +4066,11 @@ type mssqlTransactions struct {
// Win32_PerfRawData_MSSQLSERVER_Transactions docs: // Win32_PerfRawData_MSSQLSERVER_Transactions docs:
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object // - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
func (c *Collector) collectTransactions(ctx *types.ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) error { func (c *Collector) collectTransactions(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric, sqlInstance string) error {
var dst []mssqlTransactions var dst []mssqlTransactions
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance))
if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst, logger); err != nil {
return err return err
} }

View File

@@ -12,6 +12,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "net" const Name = "net"
@@ -31,7 +32,6 @@ var nicNameToUnderscore = regexp.MustCompile("[^a-zA-Z0-9]")
// A Collector is a Prometheus Collector for Perflib Network Interface metrics. // A Collector is a Prometheus Collector for Perflib Network Interface metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
bytesReceivedTotal *prometheus.Desc bytesReceivedTotal *prometheus.Desc
bytesSentTotal *prometheus.Desc bytesSentTotal *prometheus.Desc
@@ -48,7 +48,7 @@ type Collector struct {
currentBandwidth *prometheus.Desc currentBandwidth *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -65,8 +65,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -110,11 +108,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Network Interface"}, nil return []string{"Network Interface"}, nil
} }
@@ -122,7 +116,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.bytesReceivedTotal = prometheus.NewDesc( c.bytesReceivedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"), prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"),
"(Network.BytesReceivedPerSec)", "(Network.BytesReceivedPerSec)",
@@ -207,9 +201,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting net metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting net metrics", "err", err)
return err return err
} }
return nil return nil
@@ -240,10 +235,11 @@ type networkInterface struct {
CurrentBandwidth float64 `perflib:"Current Bandwidth"` CurrentBandwidth float64 `perflib:"Current Bandwidth"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []networkInterface var dst []networkInterface
if err := perflib.UnmarshalObject(ctx.PerfObjects["Network Interface"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["Network Interface"], &dst, logger); err != nil {
return err return err
} }

View File

@@ -3,12 +3,14 @@
package netframework_clrexceptions package netframework_clrexceptions
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrexceptions" const Name = "netframework_clrexceptions"
@@ -19,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
numberOfExceptionsThrown *prometheus.Desc numberOfExceptionsThrown *prometheus.Desc
numberOfFilters *prometheus.Desc numberOfFilters *prometheus.Desc
@@ -28,7 +30,7 @@ type Collector struct {
throwToCatchDepth *prometheus.Desc throwToCatchDepth *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -37,8 +39,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -50,11 +50,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -62,7 +58,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.numberOfExceptionsThrown = prometheus.NewDesc( c.numberOfExceptionsThrown = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "exceptions_thrown_total"), prometheus.BuildFQName(types.Namespace, Name, "exceptions_thrown_total"),
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.", "Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
@@ -92,9 +93,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "err", err)
return err return err
} }
return nil return nil
@@ -112,8 +114,7 @@ type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRExceptions", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -3,12 +3,14 @@
package netframework_clrinterop package netframework_clrinterop
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrinterop" const Name = "netframework_clrinterop"
@@ -19,15 +21,15 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
numberOfCCWs *prometheus.Desc numberOfCCWs *prometheus.Desc
numberOfMarshalling *prometheus.Desc numberOfMarshalling *prometheus.Desc
numberOfStubs *prometheus.Desc numberOfStubs *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -36,8 +38,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -49,11 +49,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -61,7 +57,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.numberOfCCWs = prometheus.NewDesc( c.numberOfCCWs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "com_callable_wrappers_total"), prometheus.BuildFQName(types.Namespace, Name, "com_callable_wrappers_total"),
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.", "Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
@@ -85,9 +86,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "err", err)
return err return err
} }
return nil return nil
@@ -105,8 +107,7 @@ type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRInterop", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -7,8 +7,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrjit" const Name = "netframework_clrjit"
@@ -19,8 +19,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
numberOfMethodsJitted *prometheus.Desc numberOfMethodsJitted *prometheus.Desc
timeInJit *prometheus.Desc timeInJit *prometheus.Desc
@@ -28,7 +28,7 @@ type Collector struct {
totalNumberOfILBytesJitted *prometheus.Desc totalNumberOfILBytesJitted *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -37,8 +37,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -50,11 +48,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -62,7 +56,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.numberOfMethodsJitted = prometheus.NewDesc( c.numberOfMethodsJitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "jit_methods_total"), prometheus.BuildFQName(types.Namespace, Name, "jit_methods_total"),
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.", "Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
@@ -92,9 +86,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrjit metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrjit metrics", "err", err)
return err return err
} }
return nil return nil
@@ -114,8 +109,7 @@ type Win32_PerfRawData_NETFramework_NETCLRJit struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRJit var dst []Win32_PerfRawData_NETFramework_NETCLRJit
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRJit", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -3,12 +3,14 @@
package netframework_clrloading package netframework_clrloading
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrloading" const Name = "netframework_clrloading"
@@ -19,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
bytesInLoaderHeap *prometheus.Desc bytesInLoaderHeap *prometheus.Desc
currentAppDomains *prometheus.Desc currentAppDomains *prometheus.Desc
@@ -33,7 +35,7 @@ type Collector struct {
totalNumberOfLoadFailures *prometheus.Desc totalNumberOfLoadFailures *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -42,8 +44,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -55,11 +55,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -67,7 +63,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.bytesInLoaderHeap = prometheus.NewDesc( c.bytesInLoaderHeap = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "loader_heap_size_bytes"), prometheus.BuildFQName(types.Namespace, Name, "loader_heap_size_bytes"),
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.", "Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
@@ -127,9 +129,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrloading metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrloading metrics", "err", err)
return err return err
} }
return nil return nil
@@ -158,8 +161,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRLoading", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -3,12 +3,14 @@
package netframework_clrlocksandthreads package netframework_clrlocksandthreads
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrlocksandthreads" const Name = "netframework_clrlocksandthreads"
@@ -19,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
currentQueueLength *prometheus.Desc currentQueueLength *prometheus.Desc
numberOfCurrentLogicalThreads *prometheus.Desc numberOfCurrentLogicalThreads *prometheus.Desc
@@ -31,7 +33,7 @@ type Collector struct {
totalNumberOfContentions *prometheus.Desc totalNumberOfContentions *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -40,8 +42,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -53,11 +53,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -65,7 +61,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.currentQueueLength = prometheus.NewDesc( c.currentQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_queue_length"), prometheus.BuildFQName(types.Namespace, Name, "current_queue_length"),
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.", "Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
@@ -113,9 +115,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "err", err)
return err return err
} }
return nil return nil
@@ -138,8 +141,7 @@ type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -3,12 +3,14 @@
package netframework_clrmemory package netframework_clrmemory
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrmemory" const Name = "netframework_clrmemory"
@@ -19,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
allocatedBytes *prometheus.Desc allocatedBytes *prometheus.Desc
finalizationSurvivors *prometheus.Desc finalizationSurvivors *prometheus.Desc
@@ -36,7 +38,7 @@ type Collector struct {
timeInGC *prometheus.Desc timeInGC *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -45,8 +47,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -58,11 +58,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -70,7 +66,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.allocatedBytes = prometheus.NewDesc( c.allocatedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "allocated_bytes_total"), prometheus.BuildFQName(types.Namespace, Name, "allocated_bytes_total"),
"Displays the total number of bytes allocated on the garbage collection heap.", "Displays the total number of bytes allocated on the garbage collection heap.",
@@ -148,9 +150,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "err", err)
return err return err
} }
return nil return nil
@@ -192,8 +195,7 @@ type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRMemory", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -3,12 +3,14 @@
package netframework_clrremoting package netframework_clrremoting
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrremoting" const Name = "netframework_clrremoting"
@@ -19,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
channels *prometheus.Desc channels *prometheus.Desc
contextBoundClassesLoaded *prometheus.Desc contextBoundClassesLoaded *prometheus.Desc
@@ -30,7 +32,7 @@ type Collector struct {
totalRemoteCalls *prometheus.Desc totalRemoteCalls *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -39,8 +41,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -52,11 +52,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -64,7 +60,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.channels = prometheus.NewDesc( c.channels = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "channels_total"), prometheus.BuildFQName(types.Namespace, Name, "channels_total"),
"Displays the total number of remoting channels registered across all application domains since application started.", "Displays the total number of remoting channels registered across all application domains since application started.",
@@ -106,9 +108,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "err", err)
return err return err
} }
return nil return nil
@@ -128,8 +131,7 @@ type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRRemoting", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -3,12 +3,14 @@
package netframework_clrsecurity package netframework_clrsecurity
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "netframework_clrsecurity" const Name = "netframework_clrsecurity"
@@ -19,8 +21,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
numberLinkTimeChecks *prometheus.Desc numberLinkTimeChecks *prometheus.Desc
timeInRTChecks *prometheus.Desc timeInRTChecks *prometheus.Desc
@@ -28,7 +30,7 @@ type Collector struct {
totalRuntimeChecks *prometheus.Desc totalRuntimeChecks *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -37,8 +39,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -50,11 +50,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -62,7 +58,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.numberLinkTimeChecks = prometheus.NewDesc( c.numberLinkTimeChecks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "link_time_checks_total"), prometheus.BuildFQName(types.Namespace, Name, "link_time_checks_total"),
"Displays the total number of link-time code access security checks since the application started.", "Displays the total number of link-time code access security checks since the application started.",
@@ -92,9 +93,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "err", err)
return err return err
} }
return nil return nil
@@ -113,8 +115,7 @@ type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRSecurity", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -1,14 +1,15 @@
package nps package nps
import ( import (
"errors"
"fmt" "fmt"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "nps" const Name = "nps"
@@ -19,8 +20,8 @@ var ConfigDefaults = Config{}
// Collector is a Prometheus Collector for WMI Win32_PerfRawData_IAS_NPSAuthenticationServer and Win32_PerfRawData_IAS_NPSAccountingServer metrics. // Collector is a Prometheus Collector for WMI Win32_PerfRawData_IAS_NPSAuthenticationServer and Win32_PerfRawData_IAS_NPSAccountingServer metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
accessAccepts *prometheus.Desc accessAccepts *prometheus.Desc
accessChallenges *prometheus.Desc accessChallenges *prometheus.Desc
@@ -50,7 +51,7 @@ type Collector struct {
accountingUnknownType *prometheus.Desc accountingUnknownType *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -59,8 +60,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -72,11 +71,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -84,7 +79,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.accessAccepts = prometheus.NewDesc( c.accessAccepts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "access_accepts"), prometheus.BuildFQName(types.Namespace, Name, "access_accepts"),
"(AccessAccepts)", "(AccessAccepts)",
@@ -241,13 +241,14 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.CollectAccept(ch); err != nil { if err := c.CollectAccept(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accept data: %s", err)) _ = level.Error(logger).Log("msg", fmt.Sprintf("failed collecting NPS accept data: %s", err))
return err return err
} }
if err := c.CollectAccounting(ch); err != nil { if err := c.CollectAccounting(ch); err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("failed collecting NPS accounting data: %s", err)) _ = level.Error(logger).Log("msg", fmt.Sprintf("failed collecting NPS accounting data: %s", err))
return err return err
} }
return nil return nil
@@ -294,8 +295,7 @@ type Win32_PerfRawData_IAS_NPSAccountingServer struct {
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) CollectAccept(ch chan<- prometheus.Metric) error { func (c *Collector) CollectAccept(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_IAS_NPSAuthenticationServer var dst []Win32_PerfRawData_IAS_NPSAuthenticationServer
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_IAS_NPSAuthenticationServer", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -382,8 +382,7 @@ func (c *Collector) CollectAccept(ch chan<- prometheus.Metric) error {
func (c *Collector) CollectAccounting(ch chan<- prometheus.Metric) error { func (c *Collector) CollectAccounting(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_IAS_NPSAccountingServer var dst []Win32_PerfRawData_IAS_NPSAccountingServer
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_IAS_NPSAccountingServer", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -21,6 +21,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows/registry" "golang.org/x/sys/windows/registry"
) )
@@ -33,7 +34,6 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics. // A Collector is a Prometheus Collector for WMI metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
osInformation *prometheus.Desc osInformation *prometheus.Desc
pagingFreeBytes *prometheus.Desc pagingFreeBytes *prometheus.Desc
@@ -56,7 +56,7 @@ type pagingFileCounter struct {
UsagePeak float64 `perflib:"% Usage Peak"` UsagePeak float64 `perflib:"% Usage Peak"`
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -65,8 +65,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -78,11 +76,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Paging File"}, nil return []string{"Paging File"}, nil
} }
@@ -90,7 +84,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.osInformation = prometheus.NewDesc( c.osInformation = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"), prometheus.BuildFQName(types.Namespace, Name, "info"),
"OperatingSystem.Caption, OperatingSystem.Version", "OperatingSystem.Caption, OperatingSystem.Version",
@@ -174,9 +168,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting os metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting os metrics", "err", err)
return err return err
} }
return nil return nil
@@ -200,7 +195,8 @@ type Win32_OperatingSystem struct {
Version string Version string
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
nwgi, err := netapi32.GetWorkstationInfo() nwgi, err := netapi32.GetWorkstationInfo()
if err != nil { if err != nil {
return err return err
@@ -237,7 +233,7 @@ func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
file, err := os.Stat(fileString) file, err := os.Stat(fileString)
// For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting. // For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting.
if err != nil { if err != nil {
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("Failed to read page file (reason: %s): %s\n", err, fileString)) _ = level.Debug(logger).Log("msg", fmt.Sprintf("Failed to read page file (reason: %s): %s\n", err, fileString))
} else { } else {
fsipf += float64(file.Size()) fsipf += float64(file.Size())
} }
@@ -274,7 +270,7 @@ func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
} }
pfc := make([]pagingFileCounter, 0) pfc := make([]pagingFileCounter, 0)
if err := perflib.UnmarshalObject(ctx.PerfObjects["Paging File"], &pfc, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["Paging File"], &pfc, logger); err != nil {
return err return err
} }
@@ -334,7 +330,7 @@ func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
fsipf, fsipf,
) )
} else { } else {
_ = level.Debug(c.logger).Log("msg", "Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.") _ = level.Debug(logger).Log("msg", "Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.virtualMemoryFreeBytes, c.virtualMemoryFreeBytes,

View File

@@ -13,6 +13,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "physical_disk" const Name = "physical_disk"
@@ -30,7 +31,6 @@ var ConfigDefaults = Config{
// A Collector is a Prometheus Collector for perflib PhysicalDisk metrics. // A Collector is a Prometheus Collector for perflib PhysicalDisk metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
idleTime *prometheus.Desc idleTime *prometheus.Desc
readBytesTotal *prometheus.Desc readBytesTotal *prometheus.Desc
@@ -46,7 +46,7 @@ type Collector struct {
writesTotal *prometheus.Desc writesTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -63,8 +63,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -108,11 +106,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"PhysicalDisk"}, nil return []string{"PhysicalDisk"}, nil
} }
@@ -120,7 +114,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.requestsQueued = prometheus.NewDesc( c.requestsQueued = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"), prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
"The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)", "The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)",
@@ -210,9 +204,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting physical_disk metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting physical_disk metrics", "err", err)
return err return err
} }
return nil return nil
@@ -237,9 +232,10 @@ type PhysicalDisk struct {
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"` AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []PhysicalDisk var dst []PhysicalDisk
if err := perflib.UnmarshalObject(ctx.PerfObjects["PhysicalDisk"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["PhysicalDisk"], &dst, logger); err != nil {
return err return err
} }

View File

@@ -3,6 +3,7 @@
package printer package printer
import ( import (
"errors"
"fmt" "fmt"
"regexp" "regexp"
"strings" "strings"
@@ -11,8 +12,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "printer" const Name = "printer"
@@ -39,15 +40,15 @@ var ConfigDefaults = Config{
} }
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
printerStatus *prometheus.Desc printerStatus *prometheus.Desc
printerJobStatus *prometheus.Desc printerJobStatus *prometheus.Desc
printerJobCount *prometheus.Desc printerJobCount *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -64,8 +65,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -105,15 +104,17 @@ func NewWithFlags(app *kingpin.Application) *Collector {
return c return c
} }
func (c *Collector) SetLogger(logger log.Logger) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) Close() error { func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.printerJobStatus = prometheus.NewDesc( c.printerJobStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "job_status"), prometheus.BuildFQName(types.Namespace, Name, "job_status"),
"A counter of printer jobs by status", "A counter of printer jobs by status",
@@ -138,7 +139,7 @@ func (c *Collector) Build() error {
func (c *Collector) GetName() string { return Name } func (c *Collector) GetName() string { return Name }
func (c *Collector) GetPerfCounter() ([]string, error) { return []string{"Printer"}, nil } func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) { return []string{"Printer"}, nil }
type wmiPrinter struct { type wmiPrinter struct {
Name string Name string
@@ -152,14 +153,15 @@ type wmiPrintJob struct {
Status string Status string
} }
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collectPrinterStatus(ch); err != nil { if err := c.collectPrinterStatus(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed to collect printer status metrics", "err", err) _ = level.Error(logger).Log("msg", "failed to collect printer status metrics", "err", err)
return err return err
} }
if err := c.collectPrinterJobStatus(ch); err != nil { if err := c.collectPrinterJobStatus(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed to collect printer job status metrics", "err", err) _ = level.Error(logger).Log("msg", "failed to collect printer job status metrics", "err", err)
return err return err
} }
@@ -168,9 +170,7 @@ func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
func (c *Collector) collectPrinterStatus(ch chan<- prometheus.Metric) error { func (c *Collector) collectPrinterStatus(ch chan<- prometheus.Metric) error {
var printers []wmiPrinter var printers []wmiPrinter
if err := c.wmiClient.Query("SELECT * FROM win32_Printer", &printers); err != nil {
q := wmi.QueryAllForClass(&printers, "win32_Printer", c.logger)
if err := wmi.Query(q, &printers); err != nil {
return err return err
} }
@@ -208,9 +208,7 @@ func (c *Collector) collectPrinterStatus(ch chan<- prometheus.Metric) error {
func (c *Collector) collectPrinterJobStatus(ch chan<- prometheus.Metric) error { func (c *Collector) collectPrinterJobStatus(ch chan<- prometheus.Metric) error {
var printJobs []wmiPrintJob var printJobs []wmiPrintJob
if err := c.wmiClient.Query("SELECT * FROM win32_PrintJob", &printJobs); err != nil {
q := wmi.QueryAllForClass(&printJobs, "win32_PrintJob", c.logger)
if err := wmi.Query(q, &printJobs); err != nil {
return err return err
} }

View File

@@ -9,14 +9,15 @@ import (
"strconv" "strconv"
"strings" "strings"
"syscall" "syscall"
"unsafe"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
@@ -26,22 +27,21 @@ type Config struct {
ProcessInclude *regexp.Regexp `yaml:"process_include"` ProcessInclude *regexp.Regexp `yaml:"process_include"`
ProcessExclude *regexp.Regexp `yaml:"process_exclude"` ProcessExclude *regexp.Regexp `yaml:"process_exclude"`
EnableWorkerProcess bool `yaml:"enable_iis_worker_process"` //nolint:tagliatelle EnableWorkerProcess bool `yaml:"enable_iis_worker_process"` //nolint:tagliatelle
EnableReportOwner bool `yaml:"enable_report_owner"`
} }
var ConfigDefaults = Config{ var ConfigDefaults = Config{
ProcessInclude: types.RegExpAny, ProcessInclude: types.RegExpAny,
ProcessExclude: types.RegExpEmpty, ProcessExclude: types.RegExpEmpty,
EnableWorkerProcess: false, EnableWorkerProcess: false,
EnableReportOwner: false,
} }
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
lookupCache map[string]string lookupCache map[string]string
info *prometheus.Desc
cpuTimeTotal *prometheus.Desc cpuTimeTotal *prometheus.Desc
handleCount *prometheus.Desc handleCount *prometheus.Desc
ioBytesTotal *prometheus.Desc ioBytesTotal *prometheus.Desc
@@ -59,7 +59,7 @@ type Collector struct {
workingSetPrivate *prometheus.Desc workingSetPrivate *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -76,8 +76,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -103,11 +101,6 @@ func NewWithFlags(app *kingpin.Application) *Collector {
"Enable IIS worker process name queries. May cause the collector to leak memory.", "Enable IIS worker process name queries. May cause the collector to leak memory.",
).Default(strconv.FormatBool(c.config.EnableWorkerProcess)).BoolVar(&c.config.EnableWorkerProcess) ).Default(strconv.FormatBool(c.config.EnableWorkerProcess)).BoolVar(&c.config.EnableWorkerProcess)
app.Flag(
"collector.process.report-owner",
"Enable reporting of process owner.",
).Default(strconv.FormatBool(c.config.EnableReportOwner)).BoolVar(&c.config.EnableReportOwner)
app.Action(func(*kingpin.ParseContext) error { app.Action(func(*kingpin.ParseContext) error {
var err error var err error
@@ -131,11 +124,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Process"}, nil return []string{"Process"}, nil
} }
@@ -143,104 +132,114 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, wmiClient *wmi.Client) error {
if c.config.ProcessInclude.String() == "^(?:.*)$" && c.config.ProcessExclude.String() == "^(?:)$" { logger = log.With(logger, "collector", Name)
_ = level.Warn(c.logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
} }
commonLabels := make([]string, 0) c.wmiClient = wmiClient
if c.config.EnableReportOwner {
commonLabels = []string{"owner"} if c.config.ProcessInclude.String() == "^(?:.*)$" && c.config.ProcessExclude.String() == "^(?:)$" {
_ = level.Warn(logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
} }
c.info = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"Process information.",
[]string{"process", "process_id", "creating_process_id", "process_group_id", "owner", "cmdline"},
nil,
)
c.startTime = prometheus.NewDesc( c.startTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "start_time"), prometheus.BuildFQName(types.Namespace, Name, "start_time"),
"Time of process start.", "Time of process start.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.cpuTimeTotal = prometheus.NewDesc( c.cpuTimeTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cpu_time_total"), prometheus.BuildFQName(types.Namespace, Name, "cpu_time_total"),
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).", "Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).",
append(commonLabels, "process", "process_id", "creating_process_id", "mode"), []string{"process", "process_id", "mode"},
nil, nil,
) )
c.handleCount = prometheus.NewDesc( c.handleCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "handles"), prometheus.BuildFQName(types.Namespace, Name, "handles"),
"Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.", "Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.ioBytesTotal = prometheus.NewDesc( c.ioBytesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "io_bytes_total"), prometheus.BuildFQName(types.Namespace, Name, "io_bytes_total"),
"Bytes issued to I/O operations in different modes (read, write, other).", "Bytes issued to I/O operations in different modes (read, write, other).",
append(commonLabels, "process", "process_id", "creating_process_id", "mode"), []string{"process", "process_id", "mode"},
nil, nil,
) )
c.ioOperationsTotal = prometheus.NewDesc( c.ioOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "io_operations_total"), prometheus.BuildFQName(types.Namespace, Name, "io_operations_total"),
"I/O operations issued in different modes (read, write, other).", "I/O operations issued in different modes (read, write, other).",
append(commonLabels, "process", "process_id", "creating_process_id", "mode"), []string{"process", "process_id", "mode"},
nil, nil,
) )
c.pageFaultsTotal = prometheus.NewDesc( c.pageFaultsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_faults_total"), prometheus.BuildFQName(types.Namespace, Name, "page_faults_total"),
"Page faults by the threads executing in this process.", "Page faults by the threads executing in this process.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.pageFileBytes = prometheus.NewDesc( c.pageFileBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "page_file_bytes"), prometheus.BuildFQName(types.Namespace, Name, "page_file_bytes"),
"Current number of bytes this process has used in the paging file(s).", "Current number of bytes this process has used in the paging file(s).",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.poolBytes = prometheus.NewDesc( c.poolBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "pool_bytes"), prometheus.BuildFQName(types.Namespace, Name, "pool_bytes"),
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.", "Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.",
append(commonLabels, "process", "process_id", "creating_process_id", "pool"), []string{"process", "process_id", "pool"},
nil, nil,
) )
c.priorityBase = prometheus.NewDesc( c.priorityBase = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "priority_base"), prometheus.BuildFQName(types.Namespace, Name, "priority_base"),
"Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.", "Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.privateBytes = prometheus.NewDesc( c.privateBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "private_bytes"), prometheus.BuildFQName(types.Namespace, Name, "private_bytes"),
"Current number of bytes this process has allocated that cannot be shared with other processes.", "Current number of bytes this process has allocated that cannot be shared with other processes.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.threadCount = prometheus.NewDesc( c.threadCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "threads"), prometheus.BuildFQName(types.Namespace, Name, "threads"),
"Number of threads currently active in this process.", "Number of threads currently active in this process.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.virtualBytes = prometheus.NewDesc( c.virtualBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "virtual_bytes"), prometheus.BuildFQName(types.Namespace, Name, "virtual_bytes"),
"Current size, in bytes, of the virtual address space that the process is using.", "Current size, in bytes, of the virtual address space that the process is using.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.workingSetPrivate = prometheus.NewDesc( c.workingSetPrivate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_private_bytes"), prometheus.BuildFQName(types.Namespace, Name, "working_set_private_bytes"),
"Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.", "Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.workingSetPeak = prometheus.NewDesc( c.workingSetPeak = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_peak_bytes"), prometheus.BuildFQName(types.Namespace, Name, "working_set_peak_bytes"),
"Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.", "Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
c.workingSet = prometheus.NewDesc( c.workingSet = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "working_set_bytes"), prometheus.BuildFQName(types.Namespace, Name, "working_set_bytes"),
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.", "Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.",
append(commonLabels, "process", "process_id", "creating_process_id"), []string{"process", "process_id"},
nil, nil,
) )
@@ -286,23 +285,21 @@ type WorkerProcess struct {
ProcessId uint64 ProcessId uint64
} }
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
data := make([]perflibProcess, 0) data := make([]perflibProcess, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Process"], &data, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["Process"], &data, logger)
if err != nil { if err != nil {
return err return err
} }
var workerProcesses []WorkerProcess var workerProcesses []WorkerProcess
if c.config.EnableWorkerProcess { if c.config.EnableWorkerProcess {
queryWorkerProcess := wmi.QueryAllForClass(&workerProcesses, "WorkerProcess", c.logger) if err := c.wmiClient.Query("SELECT * FROM WorkerProcess", &workerProcesses, nil, "root\\WebAdministration"); err != nil {
if err := wmi.QueryNamespace(queryWorkerProcess, &workerProcesses, "root\\WebAdministration"); err != nil { _ = level.Debug(logger).Log("msg", "Could not query WebAdministration namespace for IIS worker processes", "err", err)
_ = level.Debug(c.logger).Log("msg", "Could not query WebAdministration namespace for IIS worker processes", "err", err)
} }
} }
var owner string
for _, process := range data { for _, process := range data {
if process.Name == "_Total" || if process.Name == "_Total" ||
c.config.ProcessExclude.MatchString(process.Name) || c.config.ProcessExclude.MatchString(process.Name) ||
@@ -324,164 +321,163 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
} }
} }
labels := make([]string, 0, 4) cmdLine, processOwner, processGroupID, err := c.getProcessInformation(logger, uint32(process.IDProcess))
if err != nil {
if c.config.EnableReportOwner { _ = level.Debug(logger).Log("msg", "Failed to get process information", "pid", pid, "err", err)
owner, err = c.getProcessOwner(int(process.IDProcess))
if err != nil {
owner = "unknown"
}
labels = []string{owner}
} }
labels = append(labels, processName, pid, parentPID) ch <- prometheus.MustNewConstMetric(
c.info,
prometheus.GaugeValue,
1.0,
processName, pid, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
)
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.startTime, c.startTime,
prometheus.GaugeValue, prometheus.GaugeValue,
process.ElapsedTime, process.ElapsedTime,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.handleCount, c.handleCount,
prometheus.GaugeValue, prometheus.GaugeValue,
process.HandleCount, process.HandleCount,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.cpuTimeTotal, c.cpuTimeTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.PercentPrivilegedTime, process.PercentPrivilegedTime,
append(labels, "privileged")..., processName, pid, "privileged",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.cpuTimeTotal, c.cpuTimeTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.PercentUserTime, process.PercentUserTime,
append(labels, "user")..., processName, pid, "user",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.ioBytesTotal, c.ioBytesTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.IOOtherBytesPerSec, process.IOOtherBytesPerSec,
append(labels, "other")..., processName, pid, "other",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.ioOperationsTotal, c.ioOperationsTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.IOOtherOperationsPerSec, process.IOOtherOperationsPerSec,
append(labels, "other")..., processName, pid, "other",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.ioBytesTotal, c.ioBytesTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.IOReadBytesPerSec, process.IOReadBytesPerSec,
append(labels, "read")..., processName, pid, "read",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.ioOperationsTotal, c.ioOperationsTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.IOReadOperationsPerSec, process.IOReadOperationsPerSec,
append(labels, "read")..., processName, pid, "read",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.ioBytesTotal, c.ioBytesTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.IOWriteBytesPerSec, process.IOWriteBytesPerSec,
append(labels, "write")..., processName, pid, "write",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.ioOperationsTotal, c.ioOperationsTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.IOWriteOperationsPerSec, process.IOWriteOperationsPerSec,
append(labels, "write")..., processName, pid, "write",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.pageFaultsTotal, c.pageFaultsTotal,
prometheus.CounterValue, prometheus.CounterValue,
process.PageFaultsPerSec, process.PageFaultsPerSec,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.pageFileBytes, c.pageFileBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
process.PageFileBytes, process.PageFileBytes,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.poolBytes, c.poolBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
process.PoolNonPagedBytes, process.PoolNonPagedBytes,
append(labels, "nonpaged")..., processName, pid, "nonpaged",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.poolBytes, c.poolBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
process.PoolPagedBytes, process.PoolPagedBytes,
append(labels, "paged")..., processName, pid, "paged",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.priorityBase, c.priorityBase,
prometheus.GaugeValue, prometheus.GaugeValue,
process.PriorityBase, process.PriorityBase,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.privateBytes, c.privateBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
process.PrivateBytes, process.PrivateBytes,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.threadCount, c.threadCount,
prometheus.GaugeValue, prometheus.GaugeValue,
process.ThreadCount, process.ThreadCount,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.virtualBytes, c.virtualBytes,
prometheus.GaugeValue, prometheus.GaugeValue,
process.VirtualBytes, process.VirtualBytes,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.workingSetPrivate, c.workingSetPrivate,
prometheus.GaugeValue, prometheus.GaugeValue,
process.WorkingSetPrivate, process.WorkingSetPrivate,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.workingSetPeak, c.workingSetPeak,
prometheus.GaugeValue, prometheus.GaugeValue,
process.WorkingSetPeak, process.WorkingSetPeak,
labels..., processName, pid,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.workingSet, c.workingSet,
prometheus.GaugeValue, prometheus.GaugeValue,
process.WorkingSet, process.WorkingSet,
labels..., processName, pid,
) )
} }
@@ -489,39 +485,149 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
} }
// ref: https://github.com/microsoft/hcsshim/blob/8beabacfc2d21767a07c20f8dd5f9f3932dbf305/internal/uvm/stats.go#L25 // ref: https://github.com/microsoft/hcsshim/blob/8beabacfc2d21767a07c20f8dd5f9f3932dbf305/internal/uvm/stats.go#L25
func (c *Collector) getProcessOwner(pid int) (string, error) { func (c *Collector) getProcessInformation(logger log.Logger, pid uint32) (string, string, uint32, error) {
p, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) if pid == 0 {
if errors.Is(err, syscall.Errno(0x57)) { // invalid parameter, for PIDs that don't exist return "", "", 0, nil
return "", errors.New("process not found")
} }
hProcess, vmReadAccess, err := c.openProcess(pid)
if err != nil { if err != nil {
return "", fmt.Errorf("OpenProcess: %w", err) if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
return "", "", 0, nil
}
return "", "", 0, err
} }
defer windows.Close(p) defer func(hProcess windows.Handle) {
if err := windows.CloseHandle(hProcess); err != nil {
_ = level.Warn(logger).Log("msg", "CloseHandle failed", "err", err)
}
}(hProcess)
owner, err := c.getProcessOwner(logger, hProcess)
if err != nil {
return "", "", 0, err
}
var (
cmdLine string
processGroupID uint32
)
if vmReadAccess {
cmdLine, processGroupID, err = c.getExtendedProcessInformation(hProcess)
if err != nil {
return "", owner, processGroupID, err
}
}
return cmdLine, owner, processGroupID, nil
}
func (c *Collector) getExtendedProcessInformation(hProcess windows.Handle) (string, uint32, error) {
// Get the process environment block (PEB) address
var pbi windows.PROCESS_BASIC_INFORMATION
retLen := uint32(unsafe.Sizeof(pbi))
if err := windows.NtQueryInformationProcess(hProcess, windows.ProcessBasicInformation, unsafe.Pointer(&pbi), retLen, &retLen); err != nil {
return "", 0, fmt.Errorf("failed to query process basic information: %w", err)
}
peb := windows.PEB{}
err := windows.ReadProcessMemory(hProcess,
uintptr(unsafe.Pointer(pbi.PebBaseAddress)),
(*byte)(unsafe.Pointer(&peb)),
unsafe.Sizeof(peb),
nil,
)
if err != nil {
return "", 0, fmt.Errorf("failed to read process memory: %w", err)
}
processParameters := windows.RTL_USER_PROCESS_PARAMETERS{}
err = windows.ReadProcessMemory(hProcess,
uintptr(unsafe.Pointer(peb.ProcessParameters)),
(*byte)(unsafe.Pointer(&processParameters)),
unsafe.Sizeof(processParameters),
nil,
)
if err != nil {
return "", 0, fmt.Errorf("failed to read process memory: %w", err)
}
cmdLineUTF16 := make([]uint16, processParameters.CommandLine.Length)
err = windows.ReadProcessMemory(hProcess,
uintptr(unsafe.Pointer(processParameters.CommandLine.Buffer)),
(*byte)(unsafe.Pointer(&cmdLineUTF16[0])),
uintptr(processParameters.CommandLine.Length),
nil,
)
if err != nil {
return "", processParameters.ProcessGroupId, fmt.Errorf("failed to read process memory: %w", err)
}
return strings.TrimSpace(windows.UTF16ToString(cmdLineUTF16)), processParameters.ProcessGroupId, nil
}
func (c *Collector) getProcessOwner(logger log.Logger, hProcess windows.Handle) (string, error) {
var tok windows.Token var tok windows.Token
if err = windows.OpenProcessToken(p, windows.TOKEN_QUERY, &tok); err != nil {
return "", fmt.Errorf("OpenProcessToken: %w", err) if err := windows.OpenProcessToken(hProcess, windows.TOKEN_QUERY, &tok); err != nil {
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
return "", nil
}
return "", fmt.Errorf("failed to open process token: %w", err)
} }
defer func(tok windows.Token) {
if err := tok.Close(); err != nil {
_ = level.Warn(logger).Log("msg", "Token close failed", "err", err)
}
}(tok)
tokenUser, err := tok.GetTokenUser() tokenUser, err := tok.GetTokenUser()
if err != nil { if err != nil {
return "", fmt.Errorf("GetTokenUser: %w", err) return "", fmt.Errorf("failed to get token user: %w", err)
} }
sid := tokenUser.User.Sid.String() sid := tokenUser.User.Sid.String()
if owner, ok := c.lookupCache[sid]; ok {
return owner, nil owner, ok := c.lookupCache[sid]
if !ok {
account, domain, _, err := tokenUser.User.Sid.LookupAccount("")
if err != nil {
owner = sid
} else {
owner = fmt.Sprintf(`%s\%s`, account, domain)
}
c.lookupCache[sid] = owner
} }
account, domain, _, err := tokenUser.User.Sid.LookupAccount("") return owner, nil
if err != nil { }
c.lookupCache[sid] = sid
} else { func (c *Collector) openProcess(pid uint32) (windows.Handle, bool, error) {
c.lookupCache[sid] = fmt.Sprintf(`%s\%s`, account, domain) // Open the process with QUERY_INFORMATION and VM_READ permissions
} hProcess, err := windows.OpenProcess(windows.PROCESS_QUERY_INFORMATION|windows.PROCESS_VM_READ, false, pid)
if err == nil {
return c.lookupCache[sid], nil return hProcess, true, nil
}
if !errors.Is(err, windows.ERROR_ACCESS_DENIED) {
return 0, false, fmt.Errorf("failed to open process: %w", err)
}
if errors.Is(err, syscall.Errno(0x57)) { // invalid parameter, for PIDs that don't exist
return 0, false, errors.New("process not found")
}
hProcess, err = windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
if err != nil {
return 0, false, fmt.Errorf("failed to open process with limited permissions: %w", err)
}
return hProcess, false, nil
} }

View File

@@ -114,7 +114,7 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
for name, c := range coll.collectors.collectors { for name, c := range coll.collectors.collectors {
go func(name string, c Collector) { go func(name string, c Collector) {
defer wg.Done() defer wg.Done()
outcome := coll.execute(name, c, scrapeContext, metricsBuffer) outcome := coll.execute(coll.logger, name, c, scrapeContext, metricsBuffer)
l.Lock() l.Lock()
if !finished { if !finished {
collectorOutcomes[name] = outcome collectorOutcomes[name] = outcome
@@ -171,9 +171,9 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
l.Unlock() l.Unlock()
} }
func (coll *Prometheus) execute(name string, c Collector, ctx *types.ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome { func (coll *Prometheus) execute(logger log.Logger, name string, c Collector, ctx *types.ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome {
t := time.Now() t := time.Now()
err := c.Collect(ctx, ch) err := c.Collect(ctx, logger, ch)
duration := time.Since(t).Seconds() duration := time.Since(t).Seconds()
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
coll.scrapeDurationDesc, coll.scrapeDurationDesc,
@@ -186,6 +186,7 @@ func (coll *Prometheus) execute(name string, c Collector, ctx *types.ScrapeConte
_ = level.Error(coll.logger).Log("msg", fmt.Sprintf("collector %s failed after %fs", name, duration), "err", err) _ = level.Error(coll.logger).Log("msg", fmt.Sprintf("collector %s failed after %fs", name, duration), "err", err)
return failed return failed
} }
_ = level.Debug(coll.logger).Log("msg", fmt.Sprintf("collector %s succeeded after %fs.", name, duration)) _ = level.Debug(coll.logger).Log("msg", fmt.Sprintf("collector %s succeeded after %fs.", name, duration))
return success return success
} }

View File

@@ -12,6 +12,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "remote_fx" const Name = "remote_fx"
@@ -27,7 +28,6 @@ var ConfigDefaults = Config{}
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/ // https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
// net // net
baseTCPRTT *prometheus.Desc baseTCPRTT *prometheus.Desc
@@ -54,7 +54,7 @@ type Collector struct {
sourceFramesPerSecond *prometheus.Desc sourceFramesPerSecond *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -63,8 +63,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -76,11 +74,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"RemoteFX Network", "RemoteFX Graphics"}, nil return []string{"RemoteFX Network", "RemoteFX Graphics"}, nil
} }
@@ -88,7 +82,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(log.Logger, *wmi.Client) error {
// net // net
c.baseTCPRTT = prometheus.NewDesc( c.baseTCPRTT = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"), prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
@@ -217,13 +211,14 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session count metrics", "err", err) if err := c.collectRemoteFXNetworkCount(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting terminal services session count metrics", "err", err)
return err return err
} }
if err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil { if err := c.collectRemoteFXGraphicsCounters(ctx, logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session count metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting terminal services session count metrics", "err", err)
return err return err
} }
return nil return nil
@@ -246,9 +241,10 @@ type perflibRemoteFxNetwork struct {
RetransmissionRate float64 `perflib:"Percentage of packets that have been retransmitted"` RetransmissionRate float64 `perflib:"Percentage of packets that have been retransmitted"`
} }
func (c *Collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
dst := make([]perflibRemoteFxNetwork, 0) dst := make([]perflibRemoteFxNetwork, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Network"], &dst, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Network"], &dst, logger)
if err != nil { if err != nil {
return err return err
} }
@@ -356,9 +352,10 @@ type perflibRemoteFxGraphics struct {
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"` SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
} }
func (c *Collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
dst := make([]perflibRemoteFxGraphics, 0) dst := make([]perflibRemoteFxGraphics, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Graphics"], &dst, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Graphics"], &dst, logger)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -16,6 +16,7 @@ import (
"github.com/go-ole/go-ole/oleutil" "github.com/go-ole/go-ole/oleutil"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "scheduled_task" const Name = "scheduled_task"
@@ -32,7 +33,6 @@ var ConfigDefaults = Config{
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
lastResult *prometheus.Desc lastResult *prometheus.Desc
missedRuns *prometheus.Desc missedRuns *prometheus.Desc
@@ -69,7 +69,7 @@ type ScheduledTask struct {
type ScheduledTasks []ScheduledTask type ScheduledTasks []ScheduledTask
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -86,8 +86,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -131,11 +129,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -143,7 +137,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.lastResult = prometheus.NewDesc( c.lastResult = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "last_result"), prometheus.BuildFQName(types.Namespace, Name, "last_result"),
"The result that was returned the last time the registered task was run", "The result that was returned the last time the registered task was run",
@@ -168,9 +162,10 @@ func (c *Collector) Build() error {
return nil return nil
} }
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting user metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting user metrics", "err", err)
return err return err
} }

View File

@@ -15,8 +15,8 @@ import (
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/pkg/utils"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc/mgr" "golang.org/x/sys/windows/svc/mgr"
) )
@@ -37,12 +37,12 @@ var ConfigDefaults = Config{
// A Collector is a Prometheus Collector for WMI Win32_Service metrics. // A Collector is a Prometheus Collector for WMI Win32_Service metrics.
type Collector struct { type Collector struct {
logger log.Logger
serviceWhereClause *string serviceWhereClause *string
useAPI *bool useAPI *bool
v2 *bool v2 *bool
wmiClient *wmi.Client
Information *prometheus.Desc Information *prometheus.Desc
State *prometheus.Desc State *prometheus.Desc
StartMode *prometheus.Desc StartMode *prometheus.Desc
@@ -50,7 +50,7 @@ type Collector struct {
StateV2 *prometheus.Desc StateV2 *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -58,8 +58,8 @@ func New(logger log.Logger, config *Config) *Collector {
c := &Collector{ c := &Collector{
serviceWhereClause: &config.ServiceWhereClause, serviceWhereClause: &config.ServiceWhereClause,
useAPI: &config.UseAPI, useAPI: &config.UseAPI,
v2: &config.V2,
} }
c.SetLogger(logger)
return c return c
} }
@@ -85,11 +85,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -97,12 +93,20 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
logger = log.With(logger, "collector", Name)
if utils.IsEmpty(c.serviceWhereClause) { if utils.IsEmpty(c.serviceWhereClause) {
_ = level.Warn(c.logger).Log("msg", "No where-clause specified for service collector. This will generate a very large number of metrics!") _ = level.Warn(logger).Log("msg", "No where-clause specified for service collector. This will generate a very large number of metrics!")
} }
if *c.useAPI { if *c.useAPI {
_ = level.Warn(c.logger).Log("msg", "API collection is enabled.") _ = level.Warn(logger).Log("msg", "API collection is enabled.")
} }
c.Information = prometheus.NewDesc( c.Information = prometheus.NewDesc(
@@ -141,21 +145,22 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var err error var err error
switch { switch {
case *c.useAPI: case *c.useAPI:
if err = c.collectAPI(ch); err != nil { if err = c.collectAPI(logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err) _ = level.Error(logger).Log("msg", "failed collecting API service metrics:", "err", err)
} }
case *c.v2: case *c.v2:
if err = c.collectAPIV2(ch); err != nil { if err = c.collectAPIV2(logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err) _ = level.Error(logger).Log("msg", "failed collecting API service metrics:", "err", err)
} }
default: default:
if err = c.collectWMI(ch); err != nil { if err = c.collectWMI(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting WMI service metrics:", "err", err) _ = level.Error(logger).Log("msg", "failed collecting WMI service metrics:", "err", err)
} }
} }
@@ -226,8 +231,13 @@ var (
func (c *Collector) collectWMI(ch chan<- prometheus.Metric) error { func (c *Collector) collectWMI(ch chan<- prometheus.Metric) error {
var dst []Win32_Service var dst []Win32_Service
q := wmi.QueryAllWhere(&dst, *c.serviceWhereClause, c.logger) //nolint:staticcheck query := "SELECT * FROM Win32_Service"
if err := wmi.Query(q, &dst); err != nil {
if *c.serviceWhereClause != "" {
query += " WHERE " + *c.serviceWhereClause
}
if err := c.wmiClient.Query(query, &dst); err != nil {
return err return err
} }
for _, service := range dst { for _, service := range dst {
@@ -292,7 +302,7 @@ func (c *Collector) collectWMI(ch chan<- prometheus.Metric) error {
return nil return nil
} }
func (c *Collector) collectAPI(ch chan<- prometheus.Metric) error { func (c *Collector) collectAPI(logger log.Logger, ch chan<- prometheus.Metric) error {
svcmgrConnection, err := mgr.Connect() svcmgrConnection, err := mgr.Connect()
if err != nil { if err != nil {
return err return err
@@ -311,14 +321,14 @@ func (c *Collector) collectAPI(ch chan<- prometheus.Metric) error {
// Get UTF16 service name. // Get UTF16 service name.
serviceName, err := syscall.UTF16PtrFromString(service) serviceName, err := syscall.UTF16PtrFromString(service)
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Service %s get name error: %#v", service, err)) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Service %s get name error: %#v", service, err))
return return
} }
// Open connection for service handler. // Open connection for service handler.
serviceHandle, err := windows.OpenService(svcmgrConnection.Handle, serviceName, windows.GENERIC_READ) serviceHandle, err := windows.OpenService(svcmgrConnection.Handle, serviceName, windows.GENERIC_READ)
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Open service %s error: %#v", service, err)) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Open service %s error: %#v", service, err))
return return
} }
@@ -329,14 +339,14 @@ func (c *Collector) collectAPI(ch chan<- prometheus.Metric) error {
// Get Service Configuration. // Get Service Configuration.
serviceConfig, err := serviceManager.Config() serviceConfig, err := serviceManager.Config()
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Get service %s config error: %#v", service, err)) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Get service %s config error: %#v", service, err))
return return
} }
// Get Service Current Status. // Get Service Current Status.
serviceStatus, err := serviceManager.Query() serviceStatus, err := serviceManager.Query()
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Get service %s status error: %#v", service, err)) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Get service %s status error: %#v", service, err))
return return
} }
@@ -384,15 +394,15 @@ func (c *Collector) collectAPI(ch chan<- prometheus.Metric) error {
return nil return nil
} }
func (c *Collector) collectAPIV2(ch chan<- prometheus.Metric) error { func (c *Collector) collectAPIV2(logger log.Logger, ch chan<- prometheus.Metric) error {
services, err := c.queryAllServiceStates() services, err := c.queryAllServiceStates(logger)
if err != nil { if err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to query services", "err", err) _ = level.Warn(logger).Log("msg", "Failed to query services", "err", err)
return err return err
} }
if services == nil { if services == nil {
_ = level.Warn(c.logger).Log("msg", "No services queried") _ = level.Warn(logger).Log("msg", "No services queried")
return nil return nil
} }
@@ -428,7 +438,7 @@ func (c *Collector) collectAPIV2(ch chan<- prometheus.Metric) error {
// Copyright 2016-present Datadog, Inc. // Copyright 2016-present Datadog, Inc.
// //
// Source: https://github.com/DataDog/datadog-agent/blob/afbd8b6c87939c92610c654cb07fdfd439e4fb27/pkg/util/winutil/scmmonitor.go#L61-L96 // Source: https://github.com/DataDog/datadog-agent/blob/afbd8b6c87939c92610c654cb07fdfd439e4fb27/pkg/util/winutil/scmmonitor.go#L61-L96
func (c *Collector) queryAllServiceStates() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) { func (c *Collector) queryAllServiceStates(logger log.Logger) ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
// EnumServiceStatusEx requires only SC_MANAGER_ENUM_SERVICE. // EnumServiceStatusEx requires only SC_MANAGER_ENUM_SERVICE.
h, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE) h, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE)
if err != nil { if err != nil {
@@ -438,7 +448,7 @@ func (c *Collector) queryAllServiceStates() ([]windows.ENUM_SERVICE_STATUS_PROCE
m := &mgr.Mgr{Handle: h} m := &mgr.Mgr{Handle: h}
defer func() { defer func() {
if err := m.Disconnect(); err != nil { if err := m.Disconnect(); err != nil {
_ = level.Warn(c.logger).Log("msg", "Failed to disconnect from scm", "err", err) _ = level.Warn(logger).Log("msg", "Failed to disconnect from scm", "err", err)
} }
}() }()

View File

@@ -11,6 +11,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "smb" const Name = "smb"
@@ -21,13 +22,12 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
treeConnectCount *prometheus.Desc treeConnectCount *prometheus.Desc
currentOpenFileCount *prometheus.Desc currentOpenFileCount *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -36,8 +36,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -49,11 +47,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{ return []string{
"SMB Server Shares", "SMB Server Shares",
}, nil }, nil
@@ -63,7 +57,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
// desc creates a new prometheus description // desc creates a new prometheus description
desc := func(metricName string, description string, labels ...string) *prometheus.Desc { desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
return prometheus.NewDesc( return prometheus.NewDesc(
@@ -81,9 +75,10 @@ func (c *Collector) Build() error {
} }
// Collect collects smb metrics and sends them to prometheus. // Collect collects smb metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collectServerShares(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed to collect server share metrics", "err", err) if err := c.collectServerShares(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed to collect server share metrics", "err", err)
return err return err
} }
@@ -99,9 +94,10 @@ type perflibServerShares struct {
TreeConnectCount float64 `perflib:"Tree Connect Count"` TreeConnectCount float64 `perflib:"Tree Connect Count"`
} }
func (c *Collector) collectServerShares(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectServerShares(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibServerShares var data []perflibServerShares
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Server Shares"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Server Shares"], &data, logger); err != nil {
return err return err
} }
for _, instance := range data { for _, instance := range data {

View File

@@ -11,6 +11,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const ( const (
@@ -23,7 +24,6 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
readBytesTotal *prometheus.Desc readBytesTotal *prometheus.Desc
readBytesTransmittedViaSMBDirectTotal *prometheus.Desc readBytesTransmittedViaSMBDirectTotal *prometheus.Desc
@@ -49,7 +49,7 @@ type Collector struct {
requestSecs *prometheus.Desc requestSecs *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -58,8 +58,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -71,11 +69,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{ return []string{
"SMB Client Shares", "SMB Client Shares",
}, nil }, nil
@@ -85,7 +79,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
// desc creates a new prometheus description // desc creates a new prometheus description
desc := func(metricName string, description string, labels []string) *prometheus.Desc { desc := func(metricName string, description string, labels []string) *prometheus.Desc {
return prometheus.NewDesc( return prometheus.NewDesc(
@@ -185,9 +179,10 @@ func (c *Collector) Build() error {
} }
// Collect collects smb client metrics and sends them to prometheus. // Collect collects smb client metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collectClientShares(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "Error in ClientShares", "err", err) if err := c.collectClientShares(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "Error in ClientShares", "err", err)
return err return err
} }
@@ -221,9 +216,10 @@ type perflibClientShares struct {
WriteRequestsPerSec float64 `perflib:"Write Requests/sec"` WriteRequestsPerSec float64 `perflib:"Write Requests/sec"`
} }
func (c *Collector) collectClientShares(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectClientShares(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var data []perflibClientShares var data []perflibClientShares
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Client Shares"], &data, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Client Shares"], &data, logger); err != nil {
return err return err
} }
for _, instance := range data { for _, instance := range data {

View File

@@ -12,6 +12,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "smtp" const Name = "smtp"
@@ -28,7 +29,6 @@ var ConfigDefaults = Config{
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
badMailedMessagesBadPickupFileTotal *prometheus.Desc badMailedMessagesBadPickupFileTotal *prometheus.Desc
badMailedMessagesGeneralFailureTotal *prometheus.Desc badMailedMessagesGeneralFailureTotal *prometheus.Desc
@@ -74,7 +74,7 @@ type Collector struct {
routingTableLookupsTotal *prometheus.Desc routingTableLookupsTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -91,8 +91,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -136,11 +134,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"SMTP Server"}, nil return []string{"SMTP Server"}, nil
} }
@@ -148,8 +142,10 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, _ *wmi.Client) error {
_ = level.Info(c.logger).Log("msg", "smtp collector is in an experimental state! Metrics for this collector have not been tested.") logger = log.With(logger, "collector", Name)
_ = level.Info(logger).Log("msg", "smtp collector is in an experimental state! Metrics for this collector have not been tested.")
c.badMailedMessagesBadPickupFileTotal = prometheus.NewDesc( c.badMailedMessagesBadPickupFileTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_bad_pickup_file_total"), prometheus.BuildFQName(types.Namespace, Name, "badmailed_messages_bad_pickup_file_total"),
@@ -409,9 +405,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting smtp metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting smtp metrics", "err", err)
return err return err
} }
return nil return nil
@@ -465,9 +462,10 @@ type PerflibSMTPServer struct {
RoutingTableLookupsTotal float64 `perflib:"Routing Table Lookups Total"` RoutingTableLookupsTotal float64 `perflib:"Routing Table Lookups Total"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []PerflibSMTPServer var dst []PerflibSMTPServer
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMTP Server"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["SMTP Server"], &dst, logger); err != nil {
return err return err
} }

View File

@@ -9,6 +9,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "system" const Name = "system"
@@ -20,7 +21,6 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics. // A Collector is a Prometheus Collector for WMI metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
contextSwitchesTotal *prometheus.Desc contextSwitchesTotal *prometheus.Desc
exceptionDispatchesTotal *prometheus.Desc exceptionDispatchesTotal *prometheus.Desc
@@ -30,7 +30,7 @@ type Collector struct {
threads *prometheus.Desc threads *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -39,8 +39,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -52,11 +50,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"System"}, nil return []string{"System"}, nil
} }
@@ -64,7 +58,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.contextSwitchesTotal = prometheus.NewDesc( c.contextSwitchesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "context_switches_total"), prometheus.BuildFQName(types.Namespace, Name, "context_switches_total"),
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)", "Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
@@ -106,9 +100,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting system metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting system metrics", "err", err)
return err return err
} }
return nil return nil
@@ -125,9 +120,10 @@ type system struct {
Threads float64 `perflib:"Threads"` Threads float64 `perflib:"Threads"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []system var dst []system
if err := perflib.UnmarshalObject(ctx.PerfObjects["System"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["System"], &dst, logger); err != nil {
return err return err
} }

View File

@@ -9,6 +9,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "tcp" const Name = "tcp"
@@ -20,7 +21,6 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Tcpip_TCPv{4,6} metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Tcpip_TCPv{4,6} metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
connectionFailures *prometheus.Desc connectionFailures *prometheus.Desc
connectionsActive *prometheus.Desc connectionsActive *prometheus.Desc
@@ -33,7 +33,7 @@ type Collector struct {
segmentsSentTotal *prometheus.Desc segmentsSentTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -42,8 +42,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -55,11 +53,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"TCPv4"}, nil return []string{"TCPv4"}, nil
} }
@@ -67,7 +61,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
c.connectionFailures = prometheus.NewDesc( c.connectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_failures_total"), prometheus.BuildFQName(types.Namespace, Name, "connection_failures_total"),
"(TCP.ConnectionFailures)", "(TCP.ConnectionFailures)",
@@ -127,9 +121,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting tcp metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting tcp metrics", "err", err)
return err return err
} }
return nil return nil
@@ -207,11 +202,12 @@ func writeTCPCounters(metrics tcp, labels []string, c *Collector, ch chan<- prom
) )
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []tcp var dst []tcp
// TCPv4 counters // TCPv4 counters
if err := perflib.UnmarshalObject(ctx.PerfObjects["TCPv4"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["TCPv4"], &dst, logger); err != nil {
return err return err
} }
if len(dst) != 0 { if len(dst) != 0 {
@@ -219,7 +215,7 @@ func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metri
} }
// TCPv6 counters // TCPv6 counters
if err := perflib.UnmarshalObject(ctx.PerfObjects["TCPv6"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["TCPv6"], &dst, logger); err != nil {
return err return err
} }
if len(dst) != 0 { if len(dst) != 0 {

View File

@@ -9,8 +9,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "teradici_pcoip" const Name = "teradici_pcoip"
@@ -26,8 +26,8 @@ var ConfigDefaults = Config{}
// win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics // win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics
// win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics. // win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
audioBytesReceived *prometheus.Desc audioBytesReceived *prometheus.Desc
audioBytesSent *prometheus.Desc audioBytesSent *prometheus.Desc
@@ -72,7 +72,7 @@ type Collector struct {
usbTXBWKBitPerSec *prometheus.Desc usbTXBWKBitPerSec *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -81,8 +81,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -94,11 +92,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -106,8 +100,15 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, wmiClient *wmi.Client) error {
_ = level.Warn(c.logger).Log("msg", "teradici_pcoip collector is deprecated and will be removed in the future.") _ = level.Warn(logger).
Log("msg", "teradici_pcoip collector is deprecated and will be removed in the future.", "collector", Name)
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.audioBytesReceived = prometheus.NewDesc( c.audioBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_bytes_received_total"), prometheus.BuildFQName(types.Namespace, Name, "audio_bytes_received_total"),
@@ -340,25 +341,26 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collectAudio(ch); err != nil { if err := c.collectAudio(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session audio metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting teradici session audio metrics", "err", err)
return err return err
} }
if err := c.collectGeneral(ch); err != nil { if err := c.collectGeneral(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session general metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting teradici session general metrics", "err", err)
return err return err
} }
if err := c.collectImaging(ch); err != nil { if err := c.collectImaging(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session imaging metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting teradici session imaging metrics", "err", err)
return err return err
} }
if err := c.collectNetwork(ch); err != nil { if err := c.collectNetwork(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session network metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting teradici session network metrics", "err", err)
return err return err
} }
if err := c.collectUsb(ch); err != nil { if err := c.collectUsb(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting teradici session USB metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting teradici session USB metrics", "err", err)
return err return err
} }
return nil return nil
@@ -418,8 +420,7 @@ type win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics struct {
func (c *Collector) collectAudio(ch chan<- prometheus.Metric) error { func (c *Collector) collectAudio(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_TeradiciPerf_PCoIPSessionAudioStatistics", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {
@@ -461,8 +462,7 @@ func (c *Collector) collectAudio(ch chan<- prometheus.Metric) error {
func (c *Collector) collectGeneral(ch chan<- prometheus.Metric) error { func (c *Collector) collectGeneral(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_TeradiciPerf_PCoIPSessionGeneralStatistics", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {
@@ -516,8 +516,7 @@ func (c *Collector) collectGeneral(ch chan<- prometheus.Metric) error {
func (c *Collector) collectImaging(ch chan<- prometheus.Metric) error { func (c *Collector) collectImaging(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_TeradiciPerf_PCoIPSessionImagingStatistics", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {
@@ -595,8 +594,7 @@ func (c *Collector) collectImaging(ch chan<- prometheus.Metric) error {
func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error { func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_TeradiciPerf_PCoIPSessionNetworkStatistics", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {
@@ -668,8 +666,7 @@ func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error {
func (c *Collector) collectUsb(ch chan<- prometheus.Metric) error { func (c *Collector) collectUsb(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics var dst []win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_TeradiciPerf_PCoIPSessionUsbStatistics", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {

View File

@@ -5,6 +5,7 @@ package terminal_services
import ( import (
"errors" "errors"
"fmt" "fmt"
"strconv"
"strings" "strings"
"syscall" "syscall"
@@ -14,8 +15,8 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/headers/wtsapi32" "github.com/prometheus-community/windows_exporter/pkg/headers/wtsapi32"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const ( const (
@@ -31,10 +32,9 @@ type Win32_ServerFeature struct {
ID uint32 ID uint32
} }
func isConnectionBrokerServer(logger log.Logger) bool { func isConnectionBrokerServer(logger log.Logger, wmiClient *wmi.Client) bool {
var dst []Win32_ServerFeature var dst []Win32_ServerFeature
q := wmi.QueryAll(&dst, logger) if err := wmiClient.Query("SELECT * FROM Win32_ServerFeature", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return false return false
} }
for _, d := range dst { for _, d := range dst {
@@ -52,7 +52,6 @@ func isConnectionBrokerServer(logger log.Logger) bool {
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/ // https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
connectionBrokerEnabled bool connectionBrokerEnabled bool
@@ -75,7 +74,7 @@ type Collector struct {
workingSetPeak *prometheus.Desc workingSetPeak *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -84,8 +83,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -97,11 +94,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{ return []string{
"Terminal Services Session", "Terminal Services Session",
"Remote Desktop Connection Broker Counterset", "Remote Desktop Connection Broker Counterset",
@@ -117,13 +110,15 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, wmiClient *wmi.Client) error {
c.connectionBrokerEnabled = isConnectionBrokerServer(c.logger) logger = log.With(logger, "collector", Name)
c.connectionBrokerEnabled = isConnectionBrokerServer(logger, wmiClient)
c.sessionInfo = prometheus.NewDesc( c.sessionInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "session_info"), prometheus.BuildFQName(types.Namespace, Name, "session_info"),
"Terminal Services sessions info", "Terminal Services sessions info",
[]string{"session_name", "user", "host", "state"}, []string{"session_name", "user", "host", "state", "id"},
nil, nil,
) )
c.connectionBrokerPerformance = prometheus.NewDesc( c.connectionBrokerPerformance = prometheus.NewDesc(
@@ -223,20 +218,21 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collectWTSSessions(ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session infos", "err", err) if err := c.collectWTSSessions(logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting terminal services session infos", "err", err)
return err return err
} }
if err := c.collectTSSessionCounters(ctx, ch); err != nil { if err := c.collectTSSessionCounters(ctx, logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting terminal services session count metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting terminal services session count metrics", "err", err)
return err return err
} }
// only collect CollectionBrokerPerformance if host is a Connection Broker // only collect CollectionBrokerPerformance if host is a Connection Broker
if c.connectionBrokerEnabled { if c.connectionBrokerEnabled {
if err := c.collectCollectionBrokerPerformanceCounter(ctx, ch); err != nil { if err := c.collectCollectionBrokerPerformanceCounter(ctx, logger, ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting Connection Broker performance metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting Connection Broker performance metrics", "err", err)
return err return err
} }
} }
@@ -262,9 +258,10 @@ type perflibTerminalServicesSession struct {
WorkingSetPeak float64 `perflib:"Working Set Peak"` WorkingSetPeak float64 `perflib:"Working Set Peak"`
} }
func (c *Collector) collectTSSessionCounters(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectTSSessionCounters(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
dst := make([]perflibTerminalServicesSession, 0) dst := make([]perflibTerminalServicesSession, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Terminal Services Session"], &dst, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["Terminal Services Session"], &dst, logger)
if err != nil { if err != nil {
return err return err
} }
@@ -385,9 +382,10 @@ type perflibRemoteDesktopConnectionBrokerCounterset struct {
FailedConnections float64 `perflib:"Failed Connections"` FailedConnections float64 `perflib:"Failed Connections"`
} }
func (c *Collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
dst := make([]perflibRemoteDesktopConnectionBrokerCounterset, 0) dst := make([]perflibRemoteDesktopConnectionBrokerCounterset, 0)
err := perflib.UnmarshalObject(ctx.PerfObjects["Remote Desktop Connection Broker Counterset"], &dst, c.logger) err := perflib.UnmarshalObject(ctx.PerfObjects["Remote Desktop Connection Broker Counterset"], &dst, logger)
if err != nil { if err != nil {
return err return err
} }
@@ -419,8 +417,8 @@ func (c *Collector) collectCollectionBrokerPerformanceCounter(ctx *types.ScrapeC
return nil return nil
} }
func (c *Collector) collectWTSSessions(ch chan<- prometheus.Metric) error { func (c *Collector) collectWTSSessions(logger log.Logger, ch chan<- prometheus.Metric) error {
sessions, err := wtsapi32.WTSEnumerateSessionsEx(c.hServer, c.logger) sessions, err := wtsapi32.WTSEnumerateSessionsEx(c.hServer, logger)
if err != nil { if err != nil {
return fmt.Errorf("failed to enumerate WTS sessions: %w", err) return fmt.Errorf("failed to enumerate WTS sessions: %w", err)
} }
@@ -445,6 +443,7 @@ func (c *Collector) collectWTSSessions(ch chan<- prometheus.Metric) error {
userName, userName,
session.HostName, session.HostName,
stateName, stateName,
strconv.Itoa(int(session.SessionID)),
) )
} }
} }

View File

@@ -34,6 +34,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
"github.com/yusufpapurcu/wmi"
) )
const Name = "textfile" const Name = "textfile"
@@ -48,7 +49,6 @@ var ConfigDefaults = Config{
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
// Only set for testing to get predictable output. // Only set for testing to get predictable output.
mTime *float64 mTime *float64
@@ -56,7 +56,7 @@ type Collector struct {
mTimeDesc *prometheus.Desc mTimeDesc *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -69,8 +69,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -99,11 +97,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -111,9 +105,9 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, _ *wmi.Client) error {
_ = level.Info(c.logger). _ = level.Info(logger).
Log("msg", "textfile Collector directories: "+strings.Join(c.config.TextFileDirectories, ",")) Log("msg", "textfile Collector directories: "+strings.Join(c.config.TextFileDirectories, ","), "collector", Name)
c.mTimeDesc = prometheus.NewDesc( c.mTimeDesc = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "textfile", "mtime_seconds"), prometheus.BuildFQName(types.Namespace, "textfile", "mtime_seconds"),
@@ -150,7 +144,7 @@ func duplicateMetricEntry(metricFamilies []*dto.MetricFamily) bool {
return false return false
} }
func (c *Collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) { func (c *Collector) convertMetricFamily(logger log.Logger, metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
var valType prometheus.ValueType var valType prometheus.ValueType
var val float64 var val float64
@@ -166,7 +160,7 @@ func (c *Collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<
for _, metric := range metricFamily.GetMetric() { for _, metric := range metricFamily.GetMetric() {
if metric.TimestampMs != nil { if metric.TimestampMs != nil {
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Ignoring unsupported custom timestamp on textfile Collector metric %v", metric)) _ = level.Warn(logger).Log("msg", fmt.Sprintf("Ignoring unsupported custom timestamp on textfile Collector metric %v", metric))
} }
labels := metric.GetLabel() labels := metric.GetLabel()
@@ -236,7 +230,7 @@ func (c *Collector) convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<
buckets, values..., buckets, values...,
) )
default: default:
_ = level.Error(c.logger).Log("msg", "unknown metric type for file") _ = level.Error(logger).Log("msg", "unknown metric type for file")
continue continue
} }
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED { if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
@@ -297,7 +291,8 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
} }
// Collect implements the Collector interface. // Collect implements the Collector interface.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
errorMetric := 0.0 errorMetric := 0.0
mTimes := map[string]time.Time{} mTimes := map[string]time.Time{}
@@ -310,26 +305,26 @@ func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
for _, directory := range c.config.TextFileDirectories { for _, directory := range c.config.TextFileDirectories {
err := filepath.WalkDir(directory, func(path string, dirEntry os.DirEntry, err error) error { err := filepath.WalkDir(directory, func(path string, dirEntry os.DirEntry, err error) error {
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("msg", "Error reading directory: "+path, "err", err) _ = level.Error(logger).Log("msg", "Error reading directory: "+path, "err", err)
errorMetric = 1.0 errorMetric = 1.0
return nil return nil
} }
if !dirEntry.IsDir() && strings.HasSuffix(dirEntry.Name(), ".prom") { if !dirEntry.IsDir() && strings.HasSuffix(dirEntry.Name(), ".prom") {
_ = level.Debug(c.logger).Log("msg", "Processing file: "+path) _ = level.Debug(logger).Log("msg", "Processing file: "+path)
families_array, err := scrapeFile(path, c.logger) families_array, err := scrapeFile(path, logger)
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error scraping file: %q. Skip File.", path), "err", err) _ = level.Error(logger).Log("msg", fmt.Sprintf("Error scraping file: %q. Skip File.", path), "err", err)
errorMetric = 1.0 errorMetric = 1.0
return nil return nil
} }
fileInfo, err := os.Stat(path) fileInfo, err := os.Stat(path)
if err != nil { if err != nil {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error reading file info: %q. Skip File.", path), "err", err) _ = level.Error(logger).Log("msg", fmt.Sprintf("Error reading file info: %q. Skip File.", path), "err", err)
errorMetric = 1.0 errorMetric = 1.0
return nil return nil
} }
if _, hasName := mTimes[fileInfo.Name()]; hasName { if _, hasName := mTimes[fileInfo.Name()]; hasName {
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Duplicate filename detected: %q. Skip File.", path)) _ = level.Error(logger).Log("msg", fmt.Sprintf("Duplicate filename detected: %q. Skip File.", path))
errorMetric = 1.0 errorMetric = 1.0
return nil return nil
} }
@@ -339,18 +334,18 @@ func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric)
return nil return nil
}) })
if err != nil && directory != "" { if err != nil && directory != "" {
_ = level.Error(c.logger).Log("msg", "Error reading textfile Collector directory: "+directory, "err", err) _ = level.Error(logger).Log("msg", "Error reading textfile Collector directory: "+directory, "err", err)
errorMetric = 1.0 errorMetric = 1.0
} }
} }
// If duplicates are detected across *multiple* files, return error. // If duplicates are detected across *multiple* files, return error.
if duplicateMetricEntry(metricFamilies) { if duplicateMetricEntry(metricFamilies) {
_ = level.Error(c.logger).Log("msg", "Duplicate metrics detected across multiple files") _ = level.Error(logger).Log("msg", "Duplicate metrics detected across multiple files")
errorMetric = 1.0 errorMetric = 1.0
} else { } else {
for _, mf := range metricFamilies { for _, mf := range metricFamilies {
c.convertMetricFamily(mf, ch) c.convertMetricFamily(logger, mf, ch)
} }
} }

View File

@@ -98,7 +98,7 @@ func TestDuplicateMetricEntry(t *testing.T) {
Metric: []*dto.Metric{&metric1, &metric2}, Metric: []*dto.Metric{&metric1, &metric2},
} }
duplicateFamily := []*dto.MetricFamily{} var duplicateFamily []*dto.MetricFamily
duplicateFamily = append(duplicateFamily, &duplicate) duplicateFamily = append(duplicateFamily, &duplicate)
// Ensure detection for duplicate metrics // Ensure detection for duplicate metrics

View File

@@ -19,15 +19,16 @@ var baseDir = "../../../tools/textfile-test"
func TestMultipleDirectories(t *testing.T) { func TestMultipleDirectories(t *testing.T) {
t.Parallel() t.Parallel()
logger := log.NewLogfmtLogger(os.Stdout)
testDir := baseDir + "/multiple-dirs" testDir := baseDir + "/multiple-dirs"
testDirs := fmt.Sprintf("%[1]s/dir1,%[1]s/dir2,%[1]s/dir3", testDir) testDirs := fmt.Sprintf("%[1]s/dir1,%[1]s/dir2,%[1]s/dir3", testDir)
textFileCollector := textfile.New(log.NewLogfmtLogger(os.Stdout), &textfile.Config{ textFileCollector := textfile.New(&textfile.Config{
TextFileDirectories: strings.Split(testDirs, ","), TextFileDirectories: strings.Split(testDirs, ","),
}) })
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector}) collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
require.NoError(t, collectors.Build()) require.NoError(t, collectors.Build(logger))
scrapeContext, err := collectors.PrepareScrapeContext() scrapeContext, err := collectors.PrepareScrapeContext()
if err != nil { if err != nil {
@@ -47,7 +48,7 @@ func TestMultipleDirectories(t *testing.T) {
} }
}() }()
err = textFileCollector.Collect(scrapeContext, metrics) err = textFileCollector.Collect(scrapeContext, logger, metrics)
if err != nil { if err != nil {
t.Errorf("Unexpected error %s", err) t.Errorf("Unexpected error %s", err)
} }
@@ -62,13 +63,14 @@ func TestMultipleDirectories(t *testing.T) {
func TestDuplicateFileName(t *testing.T) { func TestDuplicateFileName(t *testing.T) {
t.Parallel() t.Parallel()
logger := log.NewLogfmtLogger(os.Stdout)
testDir := baseDir + "/duplicate-filename" testDir := baseDir + "/duplicate-filename"
textFileCollector := textfile.New(log.NewLogfmtLogger(os.Stdout), &textfile.Config{ textFileCollector := textfile.New(&textfile.Config{
TextFileDirectories: []string{testDir}, TextFileDirectories: []string{testDir},
}) })
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector}) collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
require.NoError(t, collectors.Build()) require.NoError(t, collectors.Build(logger))
scrapeContext, err := collectors.PrepareScrapeContext() scrapeContext, err := collectors.PrepareScrapeContext()
if err != nil { if err != nil {
@@ -87,7 +89,7 @@ func TestDuplicateFileName(t *testing.T) {
got += metric.String() got += metric.String()
} }
}() }()
err = textFileCollector.Collect(scrapeContext, metrics) err = textFileCollector.Collect(scrapeContext, logger, metrics)
if err != nil { if err != nil {
t.Errorf("Unexpected error %s", err) t.Errorf("Unexpected error %s", err)
} }

View File

@@ -9,8 +9,8 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "thermalzone" const Name = "thermalzone"
@@ -21,15 +21,15 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
percentPassiveLimit *prometheus.Desc percentPassiveLimit *prometheus.Desc
temperature *prometheus.Desc temperature *prometheus.Desc
throttleReasons *prometheus.Desc throttleReasons *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -38,8 +38,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -51,11 +49,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -63,7 +57,12 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.temperature = prometheus.NewDesc( c.temperature = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"), prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"),
"(Temperature)", "(Temperature)",
@@ -93,9 +92,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collect(ch); err != nil { if err := c.collect(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting thermalzone metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting thermalzone metrics", "err", err)
return err return err
} }
return nil return nil
@@ -113,14 +113,13 @@ type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
func (c *Collector) collect(ch chan<- prometheus.Metric) error { func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_Counters_ThermalZoneInformation", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
// ThermalZone collector has been known to 'successfully' return an empty result. // ThermalZone collector has been known to 'successfully' return an empty result.
if len(dst) == 0 { if len(dst) == 0 {
return errors.New("Empty results set for collector") return errors.New("empty results set for collector")
} }
for _, info := range dst { for _, info := range dst {

View File

@@ -12,6 +12,7 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/winversion" "github.com/prometheus-community/windows_exporter/pkg/winversion"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "time" const Name = "time"
@@ -23,7 +24,6 @@ var ConfigDefaults = Config{}
// Collector is a Prometheus Collector for Perflib counter metrics. // Collector is a Prometheus Collector for Perflib counter metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger
clockFrequencyAdjustmentPPBTotal *prometheus.Desc clockFrequencyAdjustmentPPBTotal *prometheus.Desc
computedTimeOffset *prometheus.Desc computedTimeOffset *prometheus.Desc
@@ -33,7 +33,7 @@ type Collector struct {
ntpServerOutgoingResponsesTotal *prometheus.Desc ntpServerOutgoingResponsesTotal *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -42,8 +42,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -55,11 +53,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{"Windows Time Service"}, nil return []string{"Windows Time Service"}, nil
} }
@@ -67,9 +61,9 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, _ *wmi.Client) error {
if winversion.WindowsVersionFloat <= 6.1 { if winversion.WindowsVersionFloat() <= 6.1 {
return errors.New("Windows version older than Server 2016 detected. The time collector will not run and should be disabled via CLI flags or configuration file") return errors.New("windows version older than Server 2016 detected. The time collector will not run and should be disabled via CLI flags or configuration file")
} }
c.clockFrequencyAdjustmentPPBTotal = prometheus.NewDesc( c.clockFrequencyAdjustmentPPBTotal = prometheus.NewDesc(
@@ -113,9 +107,10 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ctx, ch); err != nil { logger = log.With(logger, "collector", Name)
_ = level.Error(c.logger).Log("msg", "failed collecting time metrics", "err", err) if err := c.collect(ctx, logger, ch); err != nil {
_ = level.Error(logger).Log("msg", "failed collecting time metrics", "err", err)
return err return err
} }
return nil return nil
@@ -131,9 +126,10 @@ type windowsTime struct {
NTPServerOutgoingResponsesTotal float64 `perflib:"NTP Server Outgoing Responses"` NTPServerOutgoingResponsesTotal float64 `perflib:"NTP Server Outgoing Responses"`
} }
func (c *Collector) collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
var dst []windowsTime // Single-instance class, array is required but will have single entry. var dst []windowsTime // Single-instance class, array is required but will have single entry.
if err := perflib.UnmarshalObject(ctx.PerfObjects["Windows Time Service"], &dst, c.logger); err != nil { if err := perflib.UnmarshalObject(ctx.PerfObjects["Windows Time Service"], &dst, logger); err != nil {
return err return err
} }

View File

@@ -5,32 +5,29 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
type Collectors struct { type Collectors struct {
logger log.Logger
collectors Map collectors Map
wmiClient *wmi.Client
perfCounterQuery string perfCounterQuery string
} }
type Map map[string]Collector
type ( type (
Builder func(logger log.Logger) Collector
BuilderWithFlags[C Collector] func(*kingpin.Application) C BuilderWithFlags[C Collector] func(*kingpin.Application) C
Map map[string]Collector
) )
// Collector interface that a collector has to implement. // Collector interface that a collector has to implement.
type Collector interface { type Collector interface {
Build() error Build(logger log.Logger, wmiClient *wmi.Client) error
// Close closes the collector // Close closes the collector
Close() error Close() error
// GetName get the name of the collector // GetName get the name of the collector
GetName() string GetName() string
// GetPerfCounter returns the perf counter required by the collector // GetPerfCounter returns the perf counter required by the collector
GetPerfCounter() ([]string, error) GetPerfCounter(logger log.Logger) ([]string, error)
// Collect Get new metrics and expose them via prometheus registry. // Collect Get new metrics and expose them via prometheus registry.
Collect(ctx *types.ScrapeContext, ch chan<- prometheus.Metric) (err error) Collect(ctx *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) (err error)
SetLogger(logger log.Logger)
} }

View File

@@ -10,8 +10,8 @@ import (
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/perflib"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "vmware" const Name = "vmware"
@@ -22,8 +22,8 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics. // A Collector is a Prometheus Collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics.
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
memActive *prometheus.Desc memActive *prometheus.Desc
memBallooned *prometheus.Desc memBallooned *prometheus.Desc
@@ -47,7 +47,7 @@ type Collector struct {
hostProcessorSpeedMHz *prometheus.Desc hostProcessorSpeedMHz *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -56,8 +56,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -69,11 +67,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -81,7 +75,13 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(_ log.Logger, wmiClient *wmi.Client) error {
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.memActive = prometheus.NewDesc( c.memActive = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mem_active_bytes"), prometheus.BuildFQName(types.Namespace, Name, "mem_active_bytes"),
"(MemActiveMB)", "(MemActiveMB)",
@@ -202,13 +202,14 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collectMem(ch); err != nil { if err := c.collectMem(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware memory metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware memory metrics", "err", err)
return err return err
} }
if err := c.collectCpu(ch); err != nil { if err := c.collectCpu(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware cpu metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware cpu metrics", "err", err)
return err return err
} }
return nil return nil
@@ -241,8 +242,7 @@ type Win32_PerfRawData_vmGuestLib_VCPU struct {
func (c *Collector) collectMem(ch chan<- prometheus.Metric) error { func (c *Collector) collectMem(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_vmGuestLib_VMem var dst []Win32_PerfRawData_vmGuestLib_VMem
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_vmGuestLib_VMem", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {
@@ -330,8 +330,7 @@ func mbToBytes(mb uint64) float64 {
func (c *Collector) collectCpu(ch chan<- prometheus.Metric) error { func (c *Collector) collectCpu(ch chan<- prometheus.Metric) error {
var dst []Win32_PerfRawData_vmGuestLib_VCPU var dst []Win32_PerfRawData_vmGuestLib_VCPU
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_vmGuestLib_VCPU", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
if len(dst) == 0 { if len(dst) == 0 {

View File

@@ -3,12 +3,14 @@
package vmware_blast package vmware_blast
import ( import (
"errors"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
"github.com/prometheus-community/windows_exporter/pkg/wmi"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/yusufpapurcu/wmi"
) )
const Name = "vmware_blast" const Name = "vmware_blast"
@@ -32,8 +34,8 @@ var ConfigDefaults = Config{}
// win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters // win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters
type Collector struct { type Collector struct {
config Config config Config
logger log.Logger wmiClient *wmi.Client
audioReceivedBytes *prometheus.Desc audioReceivedBytes *prometheus.Desc
audioReceivedPackets *prometheus.Desc audioReceivedPackets *prometheus.Desc
@@ -117,7 +119,7 @@ type Collector struct {
windowsMediaMMRTransmittedPackets *prometheus.Desc windowsMediaMMRTransmittedPackets *prometheus.Desc
} }
func New(logger log.Logger, config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
} }
@@ -126,8 +128,6 @@ func New(logger log.Logger, config *Config) *Collector {
config: *config, config: *config,
} }
c.SetLogger(logger)
return c return c
} }
@@ -139,11 +139,7 @@ func (c *Collector) GetName() string {
return Name return Name
} }
func (c *Collector) SetLogger(logger log.Logger) { func (c *Collector) GetPerfCounter(_ log.Logger) ([]string, error) {
c.logger = log.With(logger, "collector", Name)
}
func (c *Collector) GetPerfCounter() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@@ -151,8 +147,15 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build() error { func (c *Collector) Build(logger log.Logger, wmiClient *wmi.Client) error {
_ = level.Warn(c.logger).Log("msg", "vmware_blast collector is deprecated and will be removed in the future.") _ = level.Warn(logger).
Log("msg", "vmware_blast collector is deprecated and will be removed in the future.", "collector", Name)
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
return errors.New("wmiClient or SWbemServicesClient is nil")
}
c.wmiClient = wmiClient
c.audioReceivedBytes = prometheus.NewDesc( c.audioReceivedBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "audio_received_bytes_total"), prometheus.BuildFQName(types.Namespace, Name, "audio_received_bytes_total"),
@@ -584,53 +587,54 @@ func (c *Collector) Build() error {
// Collect sends the metric values for each metric // Collect sends the metric values for each metric
// to the provided prometheus Metric channel. // to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, ch chan<- prometheus.Metric) error { func (c *Collector) Collect(_ *types.ScrapeContext, logger log.Logger, ch chan<- prometheus.Metric) error {
logger = log.With(logger, "collector", Name)
if err := c.collectAudio(ch); err != nil { if err := c.collectAudio(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast audio metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast audio metrics", "err", err)
return err return err
} }
if err := c.collectCdr(ch); err != nil { if err := c.collectCdr(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast CDR metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast CDR metrics", "err", err)
return err return err
} }
if err := c.collectClipboard(ch); err != nil { if err := c.collectClipboard(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast clipboard metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast clipboard metrics", "err", err)
return err return err
} }
if err := c.collectHtml5Mmr(ch); err != nil { if err := c.collectHtml5Mmr(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast HTML5 MMR metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast HTML5 MMR metrics", "err", err)
return err return err
} }
if err := c.collectImaging(ch); err != nil { if err := c.collectImaging(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast imaging metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast imaging metrics", "err", err)
return err return err
} }
if err := c.collectRtav(ch); err != nil { if err := c.collectRtav(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast RTAV metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast RTAV metrics", "err", err)
return err return err
} }
if err := c.collectSerialPortandScanner(ch); err != nil { if err := c.collectSerialPortandScanner(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast serial port and scanner metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast serial port and scanner metrics", "err", err)
return err return err
} }
if err := c.collectSession(ch); err != nil { if err := c.collectSession(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast metrics", "err", err)
return err return err
} }
if err := c.collectSkypeforBusinessControl(ch); err != nil { if err := c.collectSkypeforBusinessControl(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast skype for business control metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast skype for business control metrics", "err", err)
return err return err
} }
if err := c.collectThinPrint(ch); err != nil { if err := c.collectThinPrint(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast thin print metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast thin print metrics", "err", err)
return err return err
} }
if err := c.collectUsb(ch); err != nil { if err := c.collectUsb(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast USB metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast USB metrics", "err", err)
return err return err
} }
if err := c.collectWindowsMediaMmr(ch); err != nil { if err := c.collectWindowsMediaMmr(ch); err != nil {
_ = level.Error(c.logger).Log("msg", "failed collecting vmware blast windows media MMR metrics", "err", err) _ = level.Error(logger).Log("msg", "failed collecting vmware blast windows media MMR metrics", "err", err)
return err return err
} }
return nil return nil
@@ -743,8 +747,7 @@ type win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters struct {
func (c *Collector) collectAudio(ch chan<- prometheus.Metric) error { func (c *Collector) collectAudio(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastAudioCounters var dst []win32_PerfRawData_Counters_VMwareBlastAudioCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastAudioCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -782,8 +785,7 @@ func (c *Collector) collectAudio(ch chan<- prometheus.Metric) error {
func (c *Collector) collectCdr(ch chan<- prometheus.Metric) error { func (c *Collector) collectCdr(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastCDRCounters var dst []win32_PerfRawData_Counters_VMwareBlastCDRCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastCDRCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -821,8 +823,7 @@ func (c *Collector) collectCdr(ch chan<- prometheus.Metric) error {
func (c *Collector) collectClipboard(ch chan<- prometheus.Metric) error { func (c *Collector) collectClipboard(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastClipboardCounters var dst []win32_PerfRawData_Counters_VMwareBlastClipboardCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastClipboardCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -860,8 +861,7 @@ func (c *Collector) collectClipboard(ch chan<- prometheus.Metric) error {
func (c *Collector) collectHtml5Mmr(ch chan<- prometheus.Metric) error { func (c *Collector) collectHtml5Mmr(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastHTML5MMRcounters var dst []win32_PerfRawData_Counters_VMwareBlastHTML5MMRcounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastHTML5MMRcounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -899,8 +899,7 @@ func (c *Collector) collectHtml5Mmr(ch chan<- prometheus.Metric) error {
func (c *Collector) collectImaging(ch chan<- prometheus.Metric) error { func (c *Collector) collectImaging(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastImagingCounters var dst []win32_PerfRawData_Counters_VMwareBlastImagingCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastImagingCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -986,8 +985,7 @@ func (c *Collector) collectImaging(ch chan<- prometheus.Metric) error {
func (c *Collector) collectRtav(ch chan<- prometheus.Metric) error { func (c *Collector) collectRtav(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastRTAVCounters var dst []win32_PerfRawData_Counters_VMwareBlastRTAVCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastRTAVCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1025,8 +1023,7 @@ func (c *Collector) collectRtav(ch chan<- prometheus.Metric) error {
func (c *Collector) collectSerialPortandScanner(ch chan<- prometheus.Metric) error { func (c *Collector) collectSerialPortandScanner(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastSerialPortandScannerCounters var dst []win32_PerfRawData_Counters_VMwareBlastSerialPortandScannerCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastSerialPortandScannerCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1064,8 +1061,7 @@ func (c *Collector) collectSerialPortandScanner(ch chan<- prometheus.Metric) err
func (c *Collector) collectSession(ch chan<- prometheus.Metric) error { func (c *Collector) collectSession(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastSessionCounters var dst []win32_PerfRawData_Counters_VMwareBlastSessionCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastSessionCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1181,8 +1177,7 @@ func (c *Collector) collectSession(ch chan<- prometheus.Metric) error {
func (c *Collector) collectSkypeforBusinessControl(ch chan<- prometheus.Metric) error { func (c *Collector) collectSkypeforBusinessControl(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastSkypeforBusinessControlCounters var dst []win32_PerfRawData_Counters_VMwareBlastSkypeforBusinessControlCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastSkypeforBusinessControlCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1220,8 +1215,7 @@ func (c *Collector) collectSkypeforBusinessControl(ch chan<- prometheus.Metric)
func (c *Collector) collectThinPrint(ch chan<- prometheus.Metric) error { func (c *Collector) collectThinPrint(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastThinPrintCounters var dst []win32_PerfRawData_Counters_VMwareBlastThinPrintCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastThinPrintCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1259,8 +1253,7 @@ func (c *Collector) collectThinPrint(ch chan<- prometheus.Metric) error {
func (c *Collector) collectUsb(ch chan<- prometheus.Metric) error { func (c *Collector) collectUsb(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastUSBCounters var dst []win32_PerfRawData_Counters_VMwareBlastUSBCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastUSBCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }
@@ -1298,8 +1291,7 @@ func (c *Collector) collectUsb(ch chan<- prometheus.Metric) error {
func (c *Collector) collectWindowsMediaMmr(ch chan<- prometheus.Metric) error { func (c *Collector) collectWindowsMediaMmr(ch chan<- prometheus.Metric) error {
var dst []win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters var dst []win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters
q := wmi.QueryAll(&dst, c.logger) if err := c.wmiClient.Query("SELECT * FROM win32_PerfRawData_Counters_VMwareBlastWindowsMediaMMRCounters", &dst); err != nil {
if err := wmi.Query(q, &dst); err != nil {
return err return err
} }

View File

@@ -43,7 +43,7 @@ type DynamicTimezoneInformation struct {
func GetDynamicTimeZoneInformation() (DynamicTimezoneInformation, error) { func GetDynamicTimeZoneInformation() (DynamicTimezoneInformation, error) {
var tzi DynamicTimezoneInformation var tzi DynamicTimezoneInformation
r0, _, err := syscall.SyscallN(procGetDynamicTimeZoneInformationSys.Addr(), uintptr(unsafe.Pointer(&tzi))) r0, _, err := procGetDynamicTimeZoneInformationSys.Call(uintptr(unsafe.Pointer(&tzi)))
if uint32(r0) == 0xffffffff { if uint32(r0) == 0xffffffff {
return tzi, err return tzi, err
} }

View File

@@ -45,16 +45,16 @@ var NetApiStatus = map[uint32]string{
2351: "NERR_InvalidComputer", 2351: "NERR_InvalidComputer",
// This operation is only allowed on the primary domain controller of the domain. // This operation is only allowed on the primary domain controller of the domain.
2226: "NERR_NotPrimary", 2226: "NERR_NotPrimary",
/// This operation is not allowed on this special group. // This operation is not allowed on this special group.
2234: "NERR_SpeGroupOp", 2234: "NERR_SpeGroupOp",
/// This operation is not allowed on the last administrative account. // This operation is not allowed on the last administrative account.
2452: "NERR_LastAdmin", 2452: "NERR_LastAdmin",
/// The password parameter is invalid. // The password parameter is invalid.
2203: "NERR_BadPassword", 2203: "NERR_BadPassword",
/// The password does not meet the password policy requirements. // The password does not meet the password policy requirements.
/// Check the minimum password length, password complexity and password history requirements. // Check the minimum password length, password complexity and password history requirements.
2245: "NERR_PasswordTooShort", 2245: "NERR_PasswordTooShort",
/// The user name could not be found. // The user name could not be found.
2221: "NERR_UserNotFound", 2221: "NERR_UserNotFound",
// Errors // Errors
5: "ERROR_ACCESS_DENIED", 5: "ERROR_ACCESS_DENIED",

View File

@@ -12,7 +12,8 @@ var (
procSLIsWindowsGenuineLocal = slc.NewProc("SLIsWindowsGenuineLocal") procSLIsWindowsGenuineLocal = slc.NewProc("SLIsWindowsGenuineLocal")
) )
// Define SL_GENUINE_STATE enumeration // SL_GENUINE_STATE enumeration
//
// https://learn.microsoft.com/en-us/windows/win32/api/slpublic/ne-slpublic-sl_genuine_state // https://learn.microsoft.com/en-us/windows/win32/api/slpublic/ne-slpublic-sl_genuine_state
type SL_GENUINE_STATE uint32 type SL_GENUINE_STATE uint32

View File

@@ -15,10 +15,11 @@ import (
func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, collectFunc collector.BuilderWithFlags[C]) { func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, collectFunc collector.BuilderWithFlags[C]) {
b.Helper() b.Helper()
logger := log.NewNopLogger()
c := collectFunc(kingpin.CommandLine) c := collectFunc(kingpin.CommandLine)
collectors := collector.New(map[string]collector.Collector{name: c}) collectors := collector.New(map[string]collector.Collector{name: c})
require.NoError(b, collectors.Build()) require.NoError(b, collectors.Build(logger))
collectors.SetLogger(log.NewNopLogger())
// Create perflib scrape context. // Create perflib scrape context.
// Some perflib collectors required a correct context, // Some perflib collectors required a correct context,
@@ -34,6 +35,6 @@ func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, co
}() }()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
require.NoError(b, c.Collect(scrapeContext, metrics)) require.NoError(b, c.Collect(scrapeContext, logger, metrics))
} }
} }

View File

@@ -3,24 +3,11 @@
package utils package utils
import ( import (
"slices"
"sort"
"strings" "strings"
"github.com/prometheus-community/windows_exporter/pkg/types" "github.com/prometheus-community/windows_exporter/pkg/types"
) )
// ExpandEnabledChildCollectors used by more complex Collectors where user input specifies enabled child Collectors.
// Splits provided child Collectors and deduplicate.
func ExpandEnabledChildCollectors(enabled string) []string {
result := slices.Compact(strings.Split(enabled, ","))
// Result must order, to prevent test failures.
sort.Strings(result)
return result
}
func ExpandEnabledCollectors(enabled string) []string { func ExpandEnabledCollectors(enabled string) []string {
expanded := strings.ReplaceAll(enabled, types.DefaultCollectorsPlaceholder, types.DefaultCollectors) expanded := strings.ReplaceAll(enabled, types.DefaultCollectorsPlaceholder, types.DefaultCollectors)
separated := strings.Split(expanded, ",") separated := strings.Split(expanded, ",")

View File

@@ -1,7 +1,6 @@
package utils_test package utils_test
import ( import (
"reflect"
"sort" "sort"
"strings" "strings"
"testing" "testing"
@@ -10,38 +9,6 @@ import (
"github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus-community/windows_exporter/pkg/utils"
) )
func TestExpandChildCollectors(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input string
expectedOutput []string
}{
{
name: "simple",
input: "testing1,testing2,testing3",
expectedOutput: []string{"testing1", "testing2", "testing3"},
},
{
name: "duplicate",
input: "testing1,testing2,testing2,testing3",
expectedOutput: []string{"testing1", "testing2", "testing3"},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
t.Parallel()
output := utils.ExpandEnabledChildCollectors(c.input)
if !reflect.DeepEqual(output, c.expectedOutput) {
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
}
})
}
}
func TestExpandEnabled(t *testing.T) { func TestExpandEnabled(t *testing.T) {
t.Parallel() t.Parallel()

View File

@@ -13,10 +13,6 @@ func BoolToFloat(b bool) float64 {
return 0.0 return 0.0
} }
func HasValue(v *string) bool {
return !IsEmpty(v)
}
func IsEmpty(v *string) bool { func IsEmpty(v *string) bool {
return v == nil || *v == "" return v == nil || *v == ""
} }

View File

@@ -5,43 +5,39 @@ package winversion
import ( import (
"fmt" "fmt"
"strconv" "strconv"
"sync"
"golang.org/x/sys/windows/registry" "golang.org/x/sys/windows/registry"
) )
var ( var WindowsVersionFloat = sync.OnceValue[float64](func() float64 {
WindowsVersion string version, err := getWindowsVersion()
WindowsVersionFloat float64
)
//nolint:gochecknoinits
func init() {
var err error
WindowsVersion, WindowsVersionFloat, err = GetWindowsVersion()
if err != nil { if err != nil {
panic(err) panic(err)
} }
}
return version
})
// GetWindowsVersion reads the version number of the OS from the Registry // GetWindowsVersion reads the version number of the OS from the Registry
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version // See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
func GetWindowsVersion() (string, float64, error) { func getWindowsVersion() (float64, error) {
reg, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) reg, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
if err != nil { if err != nil {
return "", 0, fmt.Errorf("couldn't open registry: %w", err) return 0, fmt.Errorf("couldn't open registry: %w", err)
} }
defer reg.Close() defer reg.Close()
windowsVersion, _, err := reg.GetStringValue("CurrentVersion") windowsVersion, _, err := reg.GetStringValue("CurrentVersion")
if err != nil { if err != nil {
return "", 0, fmt.Errorf("couldn't open registry: %w", err) return 0, fmt.Errorf("couldn't open registry: %w", err)
} }
windowsVersionFloat, err := strconv.ParseFloat(windowsVersion, 64) windowsVersionFloat, err := strconv.ParseFloat(windowsVersion, 64)
if err != nil { if err != nil {
return "", 0, fmt.Errorf("couldn't open registry: %w", err) return 0, fmt.Errorf("couldn't open registry: %w", err)
} }
return windowsVersion, windowsVersionFloat, nil return windowsVersionFloat, nil
} }

View File

@@ -1,92 +0,0 @@
package wmi
import (
"bytes"
"reflect"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/yusufpapurcu/wmi"
)
func InitWbem(logger log.Logger) error {
// This initialization prevents a memory leak on WMF 5+. See
// https://github.com/prometheus-community/windows_exporter/issues/77 and
// linked issues for details.
_ = level.Debug(logger).Log("msg", "Initializing SWbemServices")
s, err := wmi.InitializeSWbemServices(wmi.DefaultClient)
if err != nil {
return err
}
wmi.DefaultClient.AllowMissingFields = true
wmi.DefaultClient.SWbemServicesClient = s
return nil
}
func className(src interface{}) string {
s := reflect.Indirect(reflect.ValueOf(src))
t := s.Type()
if s.Kind() == reflect.Slice {
t = t.Elem()
}
return t.Name()
}
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
return wmi.Query(query, dst, connectServerArgs...)
}
func QueryNamespace(query string, dst interface{}, namespace string) error {
return wmi.QueryNamespace(query, dst, namespace)
}
// QueryAll returns a query string that selects all fields from the given
// struct type.
// Deprecated: Use QueryAllForClass instead.
func QueryAll(src interface{}, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(className(src))
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}
func QueryAllForClass(_ interface{}, class string, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(class)
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}
// Deprecated: Use QueryAllForClassWhere instead.
func QueryAllWhere(src interface{}, where string, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(className(src))
if where != "" {
b.WriteString(" WHERE ")
b.WriteString(where)
}
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}
func QueryAllForClassWhere(_ interface{}, class string, where string, logger log.Logger) string {
var b bytes.Buffer
b.WriteString("SELECT * FROM ")
b.WriteString(class)
if where != "" {
b.WriteString(" WHERE ")
b.WriteString(where)
}
_ = level.Debug(logger).Log("msg", "Generated WMI query "+b.String())
return b.String()
}

View File

@@ -1,121 +0,0 @@
package wmi
import (
"testing"
"github.com/go-kit/log"
)
type fakeWmiClass struct {
Name string
SomeProperty int
}
var (
mapQueryAll = func(src interface{}, _ string, _ string) string {
return QueryAll(src, log.NewNopLogger())
}
mapQueryAllWhere = func(src interface{}, _ string, where string) string {
return QueryAllWhere(src, where, log.NewNopLogger())
}
mapQueryAllForClass = func(src interface{}, class string, _ string) string {
return QueryAllForClass(src, class, log.NewNopLogger())
}
mapQueryAllForClassWhere = func(src interface{}, class string, where string) string {
return QueryAllForClassWhere(src, class, where, log.NewNopLogger())
}
)
type queryFunc func(src interface{}, class string, where string) string
func TestCreateQuery(t *testing.T) {
t.Parallel()
cases := []struct {
desc string
dst interface{}
class string
where string
queryFunc queryFunc
expected string
}{
{
desc: "queryAll on single instance",
dst: fakeWmiClass{},
queryFunc: mapQueryAll,
expected: "SELECT * FROM fakeWmiClass",
},
{
desc: "queryAll on slice",
dst: []fakeWmiClass{},
queryFunc: mapQueryAll,
expected: "SELECT * FROM fakeWmiClass",
},
{
desc: "queryAllWhere on single instance",
dst: fakeWmiClass{},
where: "foo = bar",
queryFunc: mapQueryAllWhere,
expected: "SELECT * FROM fakeWmiClass WHERE foo = bar",
},
{
desc: "queryAllWhere on slice",
dst: []fakeWmiClass{},
where: "foo = bar",
queryFunc: mapQueryAllWhere,
expected: "SELECT * FROM fakeWmiClass WHERE foo = bar",
},
{
desc: "queryAllWhere on single instance with empty where",
dst: fakeWmiClass{},
queryFunc: mapQueryAllWhere,
expected: "SELECT * FROM fakeWmiClass",
},
{
desc: "queryAllForClass on single instance",
dst: fakeWmiClass{},
class: "someClass",
queryFunc: mapQueryAllForClass,
expected: "SELECT * FROM someClass",
},
{
desc: "queryAllForClass on slice",
dst: []fakeWmiClass{},
class: "someClass",
queryFunc: mapQueryAllForClass,
expected: "SELECT * FROM someClass",
},
{
desc: "queryAllForClassWhere on single instance",
dst: fakeWmiClass{},
class: "someClass",
where: "foo = bar",
queryFunc: mapQueryAllForClassWhere,
expected: "SELECT * FROM someClass WHERE foo = bar",
},
{
desc: "queryAllForClassWhere on slice",
dst: []fakeWmiClass{},
class: "someClass",
where: "foo = bar",
queryFunc: mapQueryAllForClassWhere,
expected: "SELECT * FROM someClass WHERE foo = bar",
},
{
desc: "queryAllForClassWhere on single instance with empty where",
dst: fakeWmiClass{},
class: "someClass",
queryFunc: mapQueryAllForClassWhere,
expected: "SELECT * FROM someClass",
},
}
for _, c := range cases {
t.Run(c.desc, func(t *testing.T) {
t.Parallel()
if q := c.queryFunc(c.dst, c.class, c.where); q != c.expected {
t.Errorf("Case %q failed: Expected %q, got %q", c.desc, c.expected, q)
}
})
}
}

View File

@@ -11,6 +11,20 @@ test_alpha_total 42
# TYPE windows_cpu_dpcs_total counter # TYPE windows_cpu_dpcs_total counter
# HELP windows_cpu_idle_break_events_total Total number of time processor was woken from idle # HELP windows_cpu_idle_break_events_total Total number of time processor was woken from idle
# TYPE windows_cpu_idle_break_events_total counter # TYPE windows_cpu_idle_break_events_total counter
# HELP windows_cpu_info Labelled CPU information as provided by Win32_Processor
# TYPE windows_cpu_info gauge
# HELP windows_cpu_info_core Number of cores per CPU
# TYPE windows_cpu_info_core gauge
# HELP windows_cpu_info_enabled_core Number of enabled cores per CPU
# TYPE windows_cpu_info_enabled_core gauge
# HELP windows_cpu_info_l2_cache_size Size of L2 cache per CPU
# TYPE windows_cpu_info_l2_cache_size gauge
# HELP windows_cpu_info_l3_cache_size Size of L3 cache per CPU
# TYPE windows_cpu_info_l3_cache_size gauge
# HELP windows_cpu_info_logical_processor Number of logical processors per CPU
# TYPE windows_cpu_info_logical_processor gauge
# HELP windows_cpu_info_thread Number of threads per CPU
# TYPE windows_cpu_info_thread gauge
# HELP windows_cpu_interrupts_total Total number of received and serviced hardware interrupts # HELP windows_cpu_interrupts_total Total number of received and serviced hardware interrupts
# TYPE windows_cpu_interrupts_total counter # TYPE windows_cpu_interrupts_total counter
# HELP windows_cpu_parking_status Parking Status represents whether a processor is parked or not # HELP windows_cpu_parking_status Parking Status represents whether a processor is parked or not
@@ -38,11 +52,13 @@ test_alpha_total 42
# HELP windows_exporter_collector_success windows_exporter: Whether the collector was successful. # HELP windows_exporter_collector_success windows_exporter: Whether the collector was successful.
# TYPE windows_exporter_collector_success gauge # TYPE windows_exporter_collector_success gauge
windows_exporter_collector_success{collector="cpu"} 1 windows_exporter_collector_success{collector="cpu"} 1
windows_exporter_collector_success{collector="cpu_info"} 1
windows_exporter_collector_success{collector="cs"} 1 windows_exporter_collector_success{collector="cs"} 1
windows_exporter_collector_success{collector="logical_disk"} 1 windows_exporter_collector_success{collector="logical_disk"} 1
windows_exporter_collector_success{collector="physical_disk"} 1 windows_exporter_collector_success{collector="physical_disk"} 1
windows_exporter_collector_success{collector="net"} 1 windows_exporter_collector_success{collector="net"} 1
windows_exporter_collector_success{collector="os"} 1 windows_exporter_collector_success{collector="os"} 1
windows_exporter_collector_success{collector="process"} 1
windows_exporter_collector_success{collector="scheduled_task"} 1 windows_exporter_collector_success{collector="scheduled_task"} 1
windows_exporter_collector_success{collector="service"} 1 windows_exporter_collector_success{collector="service"} 1
windows_exporter_collector_success{collector="system"} 1 windows_exporter_collector_success{collector="system"} 1
@@ -50,11 +66,13 @@ windows_exporter_collector_success{collector="textfile"} 1
# HELP windows_exporter_collector_timeout windows_exporter: Whether the collector timed out. # HELP windows_exporter_collector_timeout windows_exporter: Whether the collector timed out.
# TYPE windows_exporter_collector_timeout gauge # TYPE windows_exporter_collector_timeout gauge
windows_exporter_collector_timeout{collector="cpu"} 0 windows_exporter_collector_timeout{collector="cpu"} 0
windows_exporter_collector_timeout{collector="cpu_info"} 0
windows_exporter_collector_timeout{collector="cs"} 0 windows_exporter_collector_timeout{collector="cs"} 0
windows_exporter_collector_timeout{collector="logical_disk"} 0 windows_exporter_collector_timeout{collector="logical_disk"} 0
windows_exporter_collector_timeout{collector="physical_disk"} 0 windows_exporter_collector_timeout{collector="physical_disk"} 0
windows_exporter_collector_timeout{collector="net"} 0 windows_exporter_collector_timeout{collector="net"} 0
windows_exporter_collector_timeout{collector="os"} 0 windows_exporter_collector_timeout{collector="os"} 0
windows_exporter_collector_timeout{collector="process"} 0
windows_exporter_collector_timeout{collector="scheduled_task"} 0 windows_exporter_collector_timeout{collector="scheduled_task"} 0
windows_exporter_collector_timeout{collector="service"} 0 windows_exporter_collector_timeout{collector="service"} 0
windows_exporter_collector_timeout{collector="system"} 0 windows_exporter_collector_timeout{collector="system"} 0

View File

@@ -18,14 +18,14 @@ mkdir $textfile_dir | Out-Null
Copy-Item 'e2e-textfile.prom' -Destination "$($textfile_dir)/e2e-textfile.prom" Copy-Item 'e2e-textfile.prom' -Destination "$($textfile_dir)/e2e-textfile.prom"
# Omit dynamic collector information that will change after each run # Omit dynamic collector information that will change after each run
$skip_re = "^(go_|windows_exporter_build_info|windows_exporter_collector_duration_seconds|windows_exporter_perflib_snapshot_duration_seconds|process_|windows_textfile_mtime_seconds|windows_cpu|windows_cs|windows_logical_disk|windows_physical_disk|windows_net|windows_os|windows_service|windows_system|windows_textfile_mtime_seconds)" $skip_re = "^(go_|windows_exporter_build_info|windows_exporter_collector_duration_seconds|windows_exporter_perflib_snapshot_duration_seconds|process_|windows_textfile_mtime_seconds|windows_cpu|windows_cs|windows_logical_disk|windows_physical_disk|windows_net|windows_os|windows_process|windows_service|windows_system|windows_textfile_mtime_seconds)"
# Start process in background, awaiting HTTP requests. # Start process in background, awaiting HTTP requests.
# Use default collectors, port and address: http://localhost:9182/metrics # Use default collectors, port and address: http://localhost:9182/metrics
$exporter_proc = Start-Process ` $exporter_proc = Start-Process `
-PassThru ` -PassThru `
-FilePath ..\windows_exporter.exe ` -FilePath ..\windows_exporter.exe `
-ArgumentList "--log.level=debug --web.disable-exporter-metrics --collectors.enabled=[defaults],textfile,scheduled_task --collector.scheduled_task.include=.*WinSAT --collector.textfile.directories=$($textfile_dir)" ` -ArgumentList "--log.level=debug --web.disable-exporter-metrics --collectors.enabled=[defaults],cpu_info,textfile,process,scheduled_task --collector.process.include=explorer.exe --collector.scheduled_task.include=.*WinSAT --collector.textfile.directories=$($textfile_dir)" `
-WindowStyle Hidden ` -WindowStyle Hidden `
-RedirectStandardOutput "$($temp_dir)/windows_exporter.log" ` -RedirectStandardOutput "$($temp_dir)/windows_exporter.log" `
-RedirectStandardError "$($temp_dir)/windows_exporter_error.log" -RedirectStandardError "$($temp_dir)/windows_exporter_error.log"

View File

@@ -93,7 +93,7 @@ $temp_dir = Join-Path $env:TEMP $(New-Guid) | ForEach-Object { mkdir $_ }
$exporter_proc = Start-Process ` $exporter_proc = Start-Process `
-PassThru ` -PassThru `
-FilePath ..\windows_exporter.exe ` -FilePath ..\windows_exporter.exe `
-ArgumentList '--web.listen-address="127.0.0.1:9183" --log.level=debug' ` -ArgumentList '--web.listen-address="127.0.0.1:9183" --log.level=debug --collectors.enabled=[defaults],cpu_info,textfile,process,scheduled_task'`
-WindowStyle Hidden ` -WindowStyle Hidden `
-RedirectStandardOutput "$($temp_dir)/windows_exporter.log" ` -RedirectStandardOutput "$($temp_dir)/windows_exporter.log" `
-RedirectStandardError "$($temp_dir)/windows_exporter_error.log" -RedirectStandardError "$($temp_dir)/windows_exporter_error.log"