mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-18 02:36:36 +00:00
chore: Move private packages to internal (#1664)
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -1,12 +0,0 @@
|
||||
package ad_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/ad"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, ad.Name, ad.NewWithFlags)
|
||||
}
|
||||
@@ -1,359 +0,0 @@
|
||||
package ad
|
||||
|
||||
const (
|
||||
abANRPerSec = "AB ANR/sec"
|
||||
abBrowsesPerSec = "AB Browses/sec"
|
||||
abClientSessions = "AB Client Sessions"
|
||||
abMatchesPerSec = "AB Matches/sec"
|
||||
abPropertyReadsPerSec = "AB Property Reads/sec"
|
||||
abProxyLookupsPerSec = "AB Proxy Lookups/sec"
|
||||
abSearchesPerSec = "AB Searches/sec"
|
||||
approximateHighestDNT = "Approximate highest DNT"
|
||||
atqEstimatedQueueDelay = "ATQ Estimated Queue Delay"
|
||||
atqOutstandingQueuedRequests = "ATQ Outstanding Queued Requests"
|
||||
_ = "ATQ Queue Latency"
|
||||
atqRequestLatency = "ATQ Request Latency"
|
||||
atqThreadsLDAP = "ATQ Threads LDAP"
|
||||
atqThreadsOther = "ATQ Threads Other"
|
||||
atqThreadsTotal = "ATQ Threads Total"
|
||||
baseSearchesPerSec = "Base searches/sec"
|
||||
databaseAddsPerSec = "Database adds/sec"
|
||||
databaseDeletesPerSec = "Database deletes/sec"
|
||||
databaseModifiesPerSec = "Database modifys/sec"
|
||||
databaseRecyclesPerSec = "Database recycles/sec"
|
||||
digestBindsPerSec = "Digest Binds/sec"
|
||||
_ = "DirSync session throttling rate"
|
||||
_ = "DirSync sessions in progress"
|
||||
draHighestUSNCommittedHighPart = "DRA Highest USN Committed (High part)"
|
||||
draHighestUSNCommittedLowPart = "DRA Highest USN Committed (Low part)"
|
||||
draHighestUSNIssuedHighPart = "DRA Highest USN Issued (High part)"
|
||||
draHighestUSNIssuedLowPart = "DRA Highest USN Issued (Low part)"
|
||||
draInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot = "DRA Inbound Bytes Compressed (Between Sites, After Compression) Since Boot"
|
||||
draInboundBytesCompressedBetweenSitesAfterCompressionPerSec = "DRA Inbound Bytes Compressed (Between Sites, After Compression)/sec"
|
||||
draInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot = "DRA Inbound Bytes Compressed (Between Sites, Before Compression) Since Boot"
|
||||
draInboundBytesCompressedBetweenSitesBeforeCompressionPerSec = "DRA Inbound Bytes Compressed (Between Sites, Before Compression)/sec"
|
||||
draInboundBytesNotCompressedWithinSiteSinceBoot = "DRA Inbound Bytes Not Compressed (Within Site) Since Boot"
|
||||
draInboundBytesNotCompressedWithinSitePerSec = "DRA Inbound Bytes Not Compressed (Within Site)/sec"
|
||||
draInboundBytesTotalSinceBoot = "DRA Inbound Bytes Total Since Boot"
|
||||
draInboundBytesTotalPerSec = "DRA Inbound Bytes Total/sec"
|
||||
draInboundFullSyncObjectsRemaining = "DRA Inbound Full Sync Objects Remaining"
|
||||
draInboundLinkValueUpdatesRemainingInPacket = "DRA Inbound Link Value Updates Remaining in Packet"
|
||||
_ = "DRA Inbound Link Values/sec"
|
||||
draInboundObjectUpdatesRemainingInPacket = "DRA Inbound Object Updates Remaining in Packet"
|
||||
draInboundObjectsAppliedPerSec = "DRA Inbound Objects Applied/sec"
|
||||
draInboundObjectsFilteredPerSec = "DRA Inbound Objects Filtered/sec"
|
||||
draInboundObjectsPerSec = "DRA Inbound Objects/sec"
|
||||
draInboundPropertiesAppliedPerSec = "DRA Inbound Properties Applied/sec"
|
||||
draInboundPropertiesFilteredPerSec = "DRA Inbound Properties Filtered/sec"
|
||||
draInboundPropertiesTotalPerSec = "DRA Inbound Properties Total/sec"
|
||||
_ = "DRA Inbound Sync Link Deletion/sec"
|
||||
draInboundTotalUpdatesRemainingInPacket = "DRA Inbound Total Updates Remaining in Packet"
|
||||
draInboundValuesDNsOnlyPerSec = "DRA Inbound Values (DNs only)/sec"
|
||||
draInboundValuesTotalPerSec = "DRA Inbound Values Total/sec"
|
||||
_ = "DRA number of NC replication calls since boot"
|
||||
_ = "DRA number of successful NC replication calls since boot"
|
||||
draOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot = "DRA Outbound Bytes Compressed (Between Sites, After Compression) Since Boot"
|
||||
draOutboundBytesCompressedBetweenSitesAfterCompressionPerSec = "DRA Outbound Bytes Compressed (Between Sites, After Compression)/sec"
|
||||
draOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot = "DRA Outbound Bytes Compressed (Between Sites, Before Compression) Since Boot"
|
||||
draOutboundBytesCompressedBetweenSitesBeforeCompressionPerSec = "DRA Outbound Bytes Compressed (Between Sites, Before Compression)/sec"
|
||||
draOutboundBytesNotCompressedWithinSiteSinceBoot = "DRA Outbound Bytes Not Compressed (Within Site) Since Boot"
|
||||
draOutboundBytesNotCompressedWithinSitePerSec = "DRA Outbound Bytes Not Compressed (Within Site)/sec"
|
||||
draOutboundBytesTotalSinceBoot = "DRA Outbound Bytes Total Since Boot"
|
||||
draOutboundBytesTotalPerSec = "DRA Outbound Bytes Total/sec"
|
||||
draOutboundObjectsFilteredPerSec = "DRA Outbound Objects Filtered/sec"
|
||||
draOutboundObjectsPerSec = "DRA Outbound Objects/sec"
|
||||
draOutboundPropertiesPerSec = "DRA Outbound Properties/sec"
|
||||
draOutboundValuesDNsOnlyPerSec = "DRA Outbound Values (DNs only)/sec"
|
||||
draOutboundValuesTotalPerSec = "DRA Outbound Values Total/sec"
|
||||
draPendingReplicationOperations = "DRA Pending Replication Operations"
|
||||
draPendingReplicationSynchronizations = "DRA Pending Replication Synchronizations"
|
||||
draSyncFailuresOnSchemaMismatch = "DRA Sync Failures on Schema Mismatch"
|
||||
draSyncRequestsMade = "DRA Sync Requests Made"
|
||||
draSyncRequestsSuccessful = "DRA Sync Requests Successful"
|
||||
draThreadsGettingNCChanges = "DRA Threads Getting NC Changes"
|
||||
draThreadsGettingNCChangesHoldingSemaphore = "DRA Threads Getting NC Changes Holding Semaphore"
|
||||
_ = "DRA total number of Busy failures since boot"
|
||||
_ = "DRA total number of MissingParent failures since boot"
|
||||
_ = "DRA total number of NotEnoughAttrs/MissingObject failures since boot"
|
||||
_ = "DRA total number of Preempted failures since boot"
|
||||
_ = "DRA total time of applying replication package since boot"
|
||||
_ = "DRA total time of NC replication calls since boot"
|
||||
_ = "DRA total time of successful NC replication calls since boot"
|
||||
_ = "DRA total time of successfully applying replication package since boot"
|
||||
_ = "DRA total time on waiting async replication packages since boot"
|
||||
_ = "DRA total time on waiting sync replication packages since boot"
|
||||
dsPercentReadsFromDRA = "DS % Reads from DRA"
|
||||
dsPercentReadsFromKCC = "DS % Reads from KCC"
|
||||
dsPercentReadsFromLSA = "DS % Reads from LSA"
|
||||
dsPercentReadsFromNSPI = "DS % Reads from NSPI"
|
||||
dsPercentReadsFromNTDSAPI = "DS % Reads from NTDSAPI"
|
||||
dsPercentReadsFromSAM = "DS % Reads from SAM"
|
||||
dsPercentReadsOther = "DS % Reads Other"
|
||||
dsPercentSearchesFromDRA = "DS % Searches from DRA"
|
||||
dsPercentSearchesFromKCC = "DS % Searches from KCC"
|
||||
dsPercentSearchesFromLDAP = "DS % Searches from LDAP"
|
||||
dsPercentSearchesFromLSA = "DS % Searches from LSA"
|
||||
dsPercentSearchesFromNSPI = "DS % Searches from NSPI"
|
||||
dsPercentSearchesFromNTDSAPI = "DS % Searches from NTDSAPI"
|
||||
dsPercentSearchesFromSAM = "DS % Searches from SAM"
|
||||
dsPercentSearchesOther = "DS % Searches Other"
|
||||
dsPercentWritesFromDRA = "DS % Writes from DRA"
|
||||
dsPercentWritesFromKCC = "DS % Writes from KCC"
|
||||
dsPercentWritesFromLDAP = "DS % Writes from LDAP"
|
||||
dsPercentWritesFromLSA = "DS % Writes from LSA"
|
||||
dsPercentWritesFromNSPI = "DS % Writes from NSPI"
|
||||
dsPercentWritesFromNTDSAPI = "DS % Writes from NTDSAPI"
|
||||
dsPercentWritesFromSAM = "DS % Writes from SAM"
|
||||
dsPercentWritesOther = "DS % Writes Other"
|
||||
dsClientBindsPerSec = "DS Client Binds/sec"
|
||||
dsClientNameTranslationsPerSec = "DS Client Name Translations/sec"
|
||||
dsDirectoryReadsPerSec = "DS Directory Reads/sec"
|
||||
dsDirectorySearchesPerSec = "DS Directory Searches/sec"
|
||||
dsDirectoryWritesPerSec = "DS Directory Writes/sec"
|
||||
dsMonitorListSize = "DS Monitor List Size"
|
||||
dsNameCacheHitRate = "DS Name Cache hit rate"
|
||||
dsNotifyQueueSize = "DS Notify Queue Size"
|
||||
dsSearchSubOperationsPerSec = "DS Search sub-operations/sec"
|
||||
dsSecurityDescriptorPropagationsEvents = "DS Security Descriptor Propagations Events"
|
||||
dsSecurityDescriptorPropagatorAverageExclusionTime = "DS Security Descriptor Propagator Average Exclusion Time"
|
||||
dsSecurityDescriptorPropagatorRuntimeQueue = "DS Security Descriptor Propagator Runtime Queue"
|
||||
dsSecurityDescriptorSubOperationsPerSec = "DS Security Descriptor sub-operations/sec"
|
||||
dsServerBindsPerSec = "DS Server Binds/sec"
|
||||
dsServerNameTranslationsPerSec = "DS Server Name Translations/sec"
|
||||
dsThreadsInUse = "DS Threads in Use"
|
||||
_ = "Error eventlogs since boot"
|
||||
_ = "Error events since boot"
|
||||
externalBindsPerSec = "External Binds/sec"
|
||||
fastBindsPerSec = "Fast Binds/sec"
|
||||
_ = "Fatal events since boot"
|
||||
_ = "Info eventlogs since boot"
|
||||
ldapActiveThreads = "LDAP Active Threads"
|
||||
_ = "LDAP Add Operations"
|
||||
_ = "LDAP Add Operations/sec"
|
||||
_ = "LDAP batch slots available"
|
||||
ldapBindTime = "LDAP Bind Time"
|
||||
_ = "LDAP busy retries"
|
||||
_ = "LDAP busy retries/sec"
|
||||
ldapClientSessions = "LDAP Client Sessions"
|
||||
ldapClosedConnectionsPerSec = "LDAP Closed Connections/sec"
|
||||
_ = "LDAP Delete Operations"
|
||||
_ = "LDAP Delete Operations/sec"
|
||||
_ = "LDAP Modify DN Operations"
|
||||
_ = "LDAP Modify DN Operations/sec"
|
||||
_ = "LDAP Modify Operations"
|
||||
_ = "LDAP Modify Operations/sec"
|
||||
ldapNewConnectionsPerSec = "LDAP New Connections/sec"
|
||||
ldapNewSSLConnectionsPerSec = "LDAP New SSL Connections/sec"
|
||||
_ = "LDAP Outbound Bytes"
|
||||
_ = "LDAP Outbound Bytes/sec"
|
||||
_ = "LDAP Page Search Cache entries count"
|
||||
_ = "LDAP Page Search Cache size"
|
||||
ldapSearchesPerSec = "LDAP Searches/sec"
|
||||
ldapSuccessfulBindsPerSec = "LDAP Successful Binds/sec"
|
||||
_ = "LDAP Threads Sleeping on BUSY"
|
||||
ldapUDPOperationsPerSec = "LDAP UDP operations/sec"
|
||||
ldapWritesPerSec = "LDAP Writes/sec"
|
||||
linkValuesCleanedPerSec = "Link Values Cleaned/sec"
|
||||
_ = "Links added"
|
||||
_ = "Links added/sec"
|
||||
_ = "Links visited"
|
||||
_ = "Links visited/sec"
|
||||
_ = "Logical link deletes"
|
||||
_ = "Logical link deletes/sec"
|
||||
negotiatedBindsPerSec = "Negotiated Binds/sec"
|
||||
ntlmBindsPerSec = "NTLM Binds/sec"
|
||||
_ = "Objects returned"
|
||||
_ = "Objects returned/sec"
|
||||
_ = "Objects visited"
|
||||
_ = "Objects visited/sec"
|
||||
oneLevelSearchesPerSec = "Onelevel searches/sec"
|
||||
_ = "PDC failed password update notifications"
|
||||
_ = "PDC password update notifications/sec"
|
||||
_ = "PDC successful password update notifications"
|
||||
phantomsCleanedPerSec = "Phantoms Cleaned/sec"
|
||||
phantomsVisitedPerSec = "Phantoms Visited/sec"
|
||||
_ = "Physical link deletes"
|
||||
_ = "Physical link deletes/sec"
|
||||
_ = "Replicate Single Object operations"
|
||||
_ = "Replicate Single Object operations/sec"
|
||||
_ = "RID Pool invalidations since boot"
|
||||
_ = "RID Pool request failures since boot"
|
||||
_ = "RID Pool request successes since boot"
|
||||
samAccountGroupEvaluationLatency = "SAM Account Group Evaluation Latency"
|
||||
samDisplayInformationQueriesPerSec = "SAM Display Information Queries/sec"
|
||||
samDomainLocalGroupMembershipEvaluationsPerSec = "SAM Domain Local Group Membership Evaluations/sec"
|
||||
samEnumerationsPerSec = "SAM Enumerations/sec"
|
||||
samGCEvaluationsPerSec = "SAM GC Evaluations/sec"
|
||||
samGlobalGroupMembershipEvaluationsPerSec = "SAM Global Group Membership Evaluations/sec"
|
||||
samMachineCreationAttemptsPerSec = "SAM Machine Creation Attempts/sec"
|
||||
samMembershipChangesPerSec = "SAM Membership Changes/sec"
|
||||
samNonTransitiveMembershipEvaluationsPerSec = "SAM Non-Transitive Membership Evaluations/sec"
|
||||
samPasswordChangesPerSec = "SAM Password Changes/sec"
|
||||
samResourceGroupEvaluationLatency = "SAM Resource Group Evaluation Latency"
|
||||
samSuccessfulComputerCreationsPerSecIncludesAllRequests = "SAM Successful Computer Creations/sec: Includes all requests"
|
||||
samSuccessfulUserCreationsPerSec = "SAM Successful User Creations/sec"
|
||||
samTransitiveMembershipEvaluationsPerSec = "SAM Transitive Membership Evaluations/sec"
|
||||
samUniversalGroupMembershipEvaluationsPerSec = "SAM Universal Group Membership Evaluations/sec"
|
||||
samUserCreationAttemptsPerSec = "SAM User Creation Attempts/sec"
|
||||
simpleBindsPerSec = "Simple Binds/sec"
|
||||
subtreeSearchesPerSec = "Subtree searches/sec"
|
||||
tombstonesGarbageCollectedPerSec = "Tombstones Garbage Collected/sec"
|
||||
tombstonesVisitedPerSec = "Tombstones Visited/sec"
|
||||
transitiveOperationsMillisecondsRun = "Transitive operations milliseconds run"
|
||||
transitiveOperationsPerSec = "Transitive operations/sec"
|
||||
transitiveSubOperationsPerSec = "Transitive suboperations/sec"
|
||||
_ = "Warning eventlogs since boot"
|
||||
_ = "Warning events since boot"
|
||||
)
|
||||
|
||||
// Win32_PerfRawData_DirectoryServices_DirectoryServices docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803980.aspx
|
||||
type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
|
||||
Name string
|
||||
|
||||
ABANRPersec uint32
|
||||
ABBrowsesPersec uint32
|
||||
ABClientSessions uint32
|
||||
ABMatchesPersec uint32
|
||||
ABPropertyReadsPersec uint32
|
||||
ABProxyLookupsPersec uint32
|
||||
ABSearchesPersec uint32
|
||||
ApproximatehighestDNT uint32
|
||||
ATQEstimatedQueueDelay uint32
|
||||
ATQOutstandingQueuedRequests uint32
|
||||
ATQRequestLatency uint32
|
||||
ATQThreadsLDAP uint32
|
||||
ATQThreadsOther uint32
|
||||
ATQThreadsTotal uint32
|
||||
BasesearchesPersec uint32
|
||||
DatabaseaddsPersec uint32
|
||||
DatabasedeletesPersec uint32
|
||||
DatabasemodifysPersec uint32
|
||||
DatabaserecyclesPersec uint32
|
||||
DigestBindsPersec uint32
|
||||
DRAHighestUSNCommittedHighpart uint64
|
||||
DRAHighestUSNCommittedLowpart uint64
|
||||
DRAHighestUSNIssuedHighpart uint64
|
||||
DRAHighestUSNIssuedLowpart uint64
|
||||
DRAInboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
|
||||
DRAInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot uint32
|
||||
DRAInboundBytesCompressedBetweenSitesBeforeCompressionPersec uint32
|
||||
DRAInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot uint32
|
||||
DRAInboundBytesNotCompressedWithinSitePersec uint32
|
||||
DRAInboundBytesNotCompressedWithinSiteSinceBoot uint32
|
||||
DRAInboundBytesTotalPersec uint32
|
||||
DRAInboundBytesTotalSinceBoot uint32
|
||||
DRAInboundFullSyncObjectsRemaining uint32
|
||||
DRAInboundLinkValueUpdatesRemaininginPacket uint32
|
||||
DRAInboundObjectsAppliedPersec uint32
|
||||
DRAInboundObjectsFilteredPersec uint32
|
||||
DRAInboundObjectsPersec uint32
|
||||
DRAInboundObjectUpdatesRemaininginPacket uint32
|
||||
DRAInboundPropertiesAppliedPersec uint32
|
||||
DRAInboundPropertiesFilteredPersec uint32
|
||||
DRAInboundPropertiesTotalPersec uint32
|
||||
DRAInboundTotalUpdatesRemaininginPacket uint32
|
||||
DRAInboundValuesDNsonlyPersec uint32
|
||||
DRAInboundValuesTotalPersec uint32
|
||||
DRAOutboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
|
||||
DRAOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot uint32
|
||||
DRAOutboundBytesCompressedBetweenSitesBeforeCompressionPersec uint32
|
||||
DRAOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot uint32
|
||||
DRAOutboundBytesNotCompressedWithinSitePersec uint32
|
||||
DRAOutboundBytesNotCompressedWithinSiteSinceBoot uint32
|
||||
DRAOutboundBytesTotalPersec uint32
|
||||
DRAOutboundBytesTotalSinceBoot uint32
|
||||
DRAOutboundObjectsFilteredPersec uint32
|
||||
DRAOutboundObjectsPersec uint32
|
||||
DRAOutboundPropertiesPersec uint32
|
||||
DRAOutboundValuesDNsonlyPersec uint32
|
||||
DRAOutboundValuesTotalPersec uint32
|
||||
DRAPendingReplicationOperations uint32
|
||||
DRAPendingReplicationSynchronizations uint32
|
||||
DRASyncFailuresonSchemaMismatch uint32
|
||||
DRASyncRequestsMade uint32
|
||||
DRASyncRequestsSuccessful uint32
|
||||
DRAThreadsGettingNCChanges uint32
|
||||
DRAThreadsGettingNCChangesHoldingSemaphore uint32
|
||||
DSClientBindsPersec uint32
|
||||
DSClientNameTranslationsPersec uint32
|
||||
DSDirectoryReadsPersec uint32
|
||||
DSDirectorySearchesPersec uint32
|
||||
DSDirectoryWritesPersec uint32
|
||||
DSMonitorListSize uint32
|
||||
DSNameCachehitrate uint32
|
||||
DSNameCachehitrate_Base uint32
|
||||
DSNotifyQueueSize uint32
|
||||
DSPercentReadsfromDRA uint32
|
||||
DSPercentReadsfromKCC uint32
|
||||
DSPercentReadsfromLSA uint32
|
||||
DSPercentReadsfromNSPI uint32
|
||||
DSPercentReadsfromNTDSAPI uint32
|
||||
DSPercentReadsfromSAM uint32
|
||||
DSPercentReadsOther uint32
|
||||
DSPercentSearchesfromDRA uint32
|
||||
DSPercentSearchesfromKCC uint32
|
||||
DSPercentSearchesfromLDAP uint32
|
||||
DSPercentSearchesfromLSA uint32
|
||||
DSPercentSearchesfromNSPI uint32
|
||||
DSPercentSearchesfromNTDSAPI uint32
|
||||
DSPercentSearchesfromSAM uint32
|
||||
DSPercentSearchesOther uint32
|
||||
DSPercentWritesfromDRA uint32
|
||||
DSPercentWritesfromKCC uint32
|
||||
DSPercentWritesfromLDAP uint32
|
||||
DSPercentWritesfromLSA uint32
|
||||
DSPercentWritesfromNSPI uint32
|
||||
DSPercentWritesfromNTDSAPI uint32
|
||||
DSPercentWritesfromSAM uint32
|
||||
DSPercentWritesOther uint32
|
||||
DSSearchsuboperationsPersec uint32
|
||||
DSSecurityDescriptorPropagationsEvents uint32
|
||||
DSSecurityDescriptorPropagatorAverageExclusionTime uint32
|
||||
DSSecurityDescriptorPropagatorRuntimeQueue uint32
|
||||
DSSecurityDescriptorsuboperationsPersec uint32
|
||||
DSServerBindsPersec uint32
|
||||
DSServerNameTranslationsPersec uint32
|
||||
DSThreadsinUse uint32
|
||||
ExternalBindsPersec uint32
|
||||
FastBindsPersec uint32
|
||||
LDAPActiveThreads uint32
|
||||
LDAPBindTime uint32
|
||||
LDAPClientSessions uint32
|
||||
LDAPClosedConnectionsPersec uint32
|
||||
LDAPNewConnectionsPersec uint32
|
||||
LDAPNewSSLConnectionsPersec uint32
|
||||
LDAPSearchesPersec uint32
|
||||
LDAPSuccessfulBindsPersec uint32
|
||||
LDAPUDPoperationsPersec uint32
|
||||
LDAPWritesPersec uint32
|
||||
LinkValuesCleanedPersec uint32
|
||||
NegotiatedBindsPersec uint32
|
||||
NTLMBindsPersec uint32
|
||||
OnelevelsearchesPersec uint32
|
||||
PhantomsCleanedPersec uint32
|
||||
PhantomsVisitedPersec uint32
|
||||
SAMAccountGroupEvaluationLatency uint32
|
||||
SAMDisplayInformationQueriesPersec uint32
|
||||
SAMDomainLocalGroupMembershipEvaluationsPersec uint32
|
||||
SAMEnumerationsPersec uint32
|
||||
SAMGCEvaluationsPersec uint32
|
||||
SAMGlobalGroupMembershipEvaluationsPersec uint32
|
||||
SAMMachineCreationAttemptsPersec uint32
|
||||
SAMMembershipChangesPersec uint32
|
||||
SAMNonTransitiveMembershipEvaluationsPersec uint32
|
||||
SAMPasswordChangesPersec uint32
|
||||
SAMResourceGroupEvaluationLatency uint32
|
||||
SAMSuccessfulComputerCreationsPersecIncludesallrequests uint32
|
||||
SAMSuccessfulUserCreationsPersec uint32
|
||||
SAMTransitiveMembershipEvaluationsPersec uint32
|
||||
SAMUniversalGroupMembershipEvaluationsPersec uint32
|
||||
SAMUserCreationAttemptsPersec uint32
|
||||
SimpleBindsPersec uint32
|
||||
SubtreesearchesPersec uint32
|
||||
TombstonesGarbageCollectedPersec uint32
|
||||
TombstonesVisitedPersec uint32
|
||||
Transitiveoperationsmillisecondsrun uint32
|
||||
TransitiveoperationsPersec uint32
|
||||
TransitivesuboperationsPersec uint32
|
||||
}
|
||||
@@ -1,398 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package adcs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "adcs"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
perfDataCollector *perfdata.Collector
|
||||
|
||||
challengeResponseProcessingTime *prometheus.Desc
|
||||
challengeResponsesPerSecond *prometheus.Desc
|
||||
failedRequestsPerSecond *prometheus.Desc
|
||||
issuedRequestsPerSecond *prometheus.Desc
|
||||
pendingRequestsPerSecond *prometheus.Desc
|
||||
requestCryptographicSigningTime *prometheus.Desc
|
||||
requestPolicyModuleProcessingTime *prometheus.Desc
|
||||
requestProcessingTime *prometheus.Desc
|
||||
requestsPerSecond *prometheus.Desc
|
||||
retrievalProcessingTime *prometheus.Desc
|
||||
retrievalsPerSecond *prometheus.Desc
|
||||
signedCertificateTimestampListProcessingTime *prometheus.Desc
|
||||
signedCertificateTimestampListsPerSecond *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
if utils.PDHEnabled() {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return []string{"Certification Authority"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
if utils.PDHEnabled() {
|
||||
counters := []string{
|
||||
requestsPerSecond,
|
||||
requestProcessingTime,
|
||||
retrievalsPerSecond,
|
||||
retrievalProcessingTime,
|
||||
failedRequestsPerSecond,
|
||||
issuedRequestsPerSecond,
|
||||
pendingRequestsPerSecond,
|
||||
requestCryptographicSigningTime,
|
||||
requestPolicyModuleProcessingTime,
|
||||
challengeResponsesPerSecond,
|
||||
challengeResponseProcessingTime,
|
||||
signedCertificateTimestampListsPerSecond,
|
||||
signedCertificateTimestampListProcessingTime,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Certification Authority", []string{"*"}, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.requestsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Total certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.requestProcessingTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "request_processing_time_seconds"),
|
||||
"Last time elapsed for certificate requests",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.retrievalsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "retrievals_total"),
|
||||
"Total certificate retrieval requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.retrievalProcessingTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "retrievals_processing_time_seconds"),
|
||||
"Last time elapsed for certificate retrieval request",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.failedRequestsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failed_requests_total"),
|
||||
"Total failed certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.issuedRequestsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "issued_requests_total"),
|
||||
"Total issued certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.pendingRequestsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pending_requests_total"),
|
||||
"Total pending certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.requestCryptographicSigningTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "request_cryptographic_signing_time_seconds"),
|
||||
"Last time elapsed for signing operation request",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.requestPolicyModuleProcessingTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "request_policy_module_processing_time_seconds"),
|
||||
"Last time elapsed for policy module processing request",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.challengeResponsesPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "challenge_responses_total"),
|
||||
"Total certificate challenge responses processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.challengeResponseProcessingTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "challenge_response_processing_time_seconds"),
|
||||
"Last time elapsed for challenge response",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.signedCertificateTimestampListsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "signed_certificate_timestamp_lists_total"),
|
||||
"Total Signed Certificate Timestamp Lists processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
c.signedCertificateTimestampListProcessingTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "signed_certificate_timestamp_list_processing_time_seconds"),
|
||||
"Last time elapsed for Signed Certificate Timestamp List",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
if utils.PDHEnabled() {
|
||||
return c.collectPDH(ch)
|
||||
}
|
||||
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collectADCSCounters(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting ADCS metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
dst := make([]perflibADCS, 0)
|
||||
|
||||
if _, ok := ctx.PerfObjects["Certification Authority"]; !ok {
|
||||
return errors.New("perflib did not contain an entry for Certification Authority")
|
||||
}
|
||||
|
||||
err := perflib.UnmarshalObject(ctx.PerfObjects["Certification Authority"], &dst, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return errors.New("perflib query for Certification Authority (ADCS) returned empty result set")
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
if d.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.RequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.RequestProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.retrievalsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.RetrievalsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.retrievalProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.RetrievalProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failedRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.FailedRequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.issuedRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.IssuedRequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pendingRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.PendingRequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestCryptographicSigningTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.RequestCryptographicSigningTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestPolicyModuleProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.RequestPolicyModuleProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.challengeResponsesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.ChallengeResponsesPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.challengeResponseProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.ChallengeResponseProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.signedCertificateTimestampListsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.SignedCertificateTimestampListsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.signedCertificateTimestampListProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.SignedCertificateTimestampListProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Certification Authority (ADCS) metrics: %w", err)
|
||||
}
|
||||
|
||||
if len(data) == 0 {
|
||||
return errors.New("perflib query for Certification Authority (ADCS) returned empty result set")
|
||||
}
|
||||
|
||||
for name, adcsData := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
adcsData[requestsPerSecond].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(adcsData[requestProcessingTime].FirstValue),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.retrievalsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
adcsData[retrievalsPerSecond].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.retrievalProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(adcsData[retrievalProcessingTime].FirstValue),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failedRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
adcsData[failedRequestsPerSecond].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.issuedRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
adcsData[issuedRequestsPerSecond].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pendingRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
adcsData[pendingRequestsPerSecond].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestCryptographicSigningTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(adcsData[requestCryptographicSigningTime].FirstValue),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestPolicyModuleProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(adcsData[requestPolicyModuleProcessingTime].FirstValue),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.challengeResponsesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
adcsData[challengeResponsesPerSecond].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.challengeResponseProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(adcsData[challengeResponseProcessingTime].FirstValue),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.signedCertificateTimestampListsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
adcsData[signedCertificateTimestampListsPerSecond].FirstValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.signedCertificateTimestampListProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(adcsData[signedCertificateTimestampListProcessingTime].FirstValue),
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package adcs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, adcs.Name, adcs.NewWithFlags)
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package adcs
|
||||
|
||||
const (
|
||||
challengeResponseProcessingTime = "Challenge Response processing time (ms)"
|
||||
challengeResponsesPerSecond = "Challenge Responses/sec"
|
||||
failedRequestsPerSecond = "Failed Requests/sec"
|
||||
issuedRequestsPerSecond = "Issued Requests/sec"
|
||||
pendingRequestsPerSecond = "Pending Requests/sec"
|
||||
requestCryptographicSigningTime = "Request cryptographic signing time (ms)"
|
||||
requestPolicyModuleProcessingTime = "Request policy module processing time (ms)"
|
||||
requestProcessingTime = "Request processing time (ms)"
|
||||
requestsPerSecond = "Requests/sec"
|
||||
retrievalProcessingTime = "Retrieval processing time (ms)"
|
||||
retrievalsPerSecond = "Retrievals/sec"
|
||||
signedCertificateTimestampListProcessingTime = "Signed Certificate Timestamp List processing time (ms)"
|
||||
signedCertificateTimestampListsPerSecond = "Signed Certificate Timestamp Lists/sec"
|
||||
)
|
||||
|
||||
type perflibADCS struct {
|
||||
Name string
|
||||
RequestsPerSecond float64 `perflib:"Requests/sec"`
|
||||
RequestProcessingTime float64 `perflib:"Request processing time (ms)"`
|
||||
RetrievalsPerSecond float64 `perflib:"Retrievals/sec"`
|
||||
RetrievalProcessingTime float64 `perflib:"Retrieval processing time (ms)"`
|
||||
FailedRequestsPerSecond float64 `perflib:"Failed Requests/sec"`
|
||||
IssuedRequestsPerSecond float64 `perflib:"Issued Requests/sec"`
|
||||
PendingRequestsPerSecond float64 `perflib:"Pending Requests/sec"`
|
||||
RequestCryptographicSigningTime float64 `perflib:"Request cryptographic signing time (ms)"`
|
||||
RequestPolicyModuleProcessingTime float64 `perflib:"Request policy module processing time (ms)"`
|
||||
ChallengeResponsesPerSecond float64 `perflib:"Challenge Responses/sec"`
|
||||
ChallengeResponseProcessingTime float64 `perflib:"Challenge Response processing time (ms)"`
|
||||
SignedCertificateTimestampListsPerSecond float64 `perflib:"Signed Certificate Timestamp Lists/sec"`
|
||||
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
|
||||
}
|
||||
@@ -1,984 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package adfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "adfs"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
perfDataCollector *perfdata.Collector
|
||||
|
||||
adLoginConnectionFailures *prometheus.Desc
|
||||
artifactDBFailures *prometheus.Desc
|
||||
avgArtifactDBQueryTime *prometheus.Desc
|
||||
avgConfigDBQueryTime *prometheus.Desc
|
||||
certificateAuthentications *prometheus.Desc
|
||||
configDBFailures *prometheus.Desc
|
||||
deviceAuthentications *prometheus.Desc
|
||||
externalAuthenticationFailures *prometheus.Desc
|
||||
externalAuthentications *prometheus.Desc
|
||||
extranetAccountLockouts *prometheus.Desc
|
||||
federatedAuthentications *prometheus.Desc
|
||||
federationMetadataRequests *prometheus.Desc
|
||||
oAuthAuthZRequests *prometheus.Desc
|
||||
oAuthClientAuthentications *prometheus.Desc
|
||||
oAuthClientAuthenticationsFailures *prometheus.Desc
|
||||
oAuthClientCredentialsRequestFailures *prometheus.Desc
|
||||
oAuthClientCredentialsRequests *prometheus.Desc
|
||||
oAuthClientPrivateKeyJwtAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientPrivateKeyJwtAuthentications *prometheus.Desc
|
||||
oAuthClientSecretBasicAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientSecretBasicAuthentications *prometheus.Desc
|
||||
oAuthClientSecretPostAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientSecretPostAuthentications *prometheus.Desc
|
||||
oAuthClientWindowsIntegratedAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientWindowsIntegratedAuthentications *prometheus.Desc
|
||||
oAuthLogonCertificateRequestFailures *prometheus.Desc
|
||||
oAuthLogonCertificateTokenRequests *prometheus.Desc
|
||||
oAuthPasswordGrantRequestFailures *prometheus.Desc
|
||||
oAuthPasswordGrantRequests *prometheus.Desc
|
||||
oAuthTokenRequests *prometheus.Desc
|
||||
passiveRequests *prometheus.Desc
|
||||
passportAuthentications *prometheus.Desc
|
||||
passwordChangeFailed *prometheus.Desc
|
||||
passwordChangeSucceeded *prometheus.Desc
|
||||
samlPTokenRequests *prometheus.Desc
|
||||
ssoAuthenticationFailures *prometheus.Desc
|
||||
ssoAuthentications *prometheus.Desc
|
||||
tokenRequests *prometheus.Desc
|
||||
upAuthenticationFailures *prometheus.Desc
|
||||
upAuthentications *prometheus.Desc
|
||||
windowsIntegratedAuthentications *prometheus.Desc
|
||||
wsFedTokenRequests *prometheus.Desc
|
||||
wsTrustTokenRequests *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
if utils.PDHEnabled() {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return []string{"AD FS"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
if utils.PDHEnabled() {
|
||||
counters := []string{
|
||||
adLoginConnectionFailures,
|
||||
certificateAuthentications,
|
||||
deviceAuthentications,
|
||||
extranetAccountLockouts,
|
||||
federatedAuthentications,
|
||||
passportAuthentications,
|
||||
passiveRequests,
|
||||
passwordChangeFailed,
|
||||
passwordChangeSucceeded,
|
||||
tokenRequests,
|
||||
windowsIntegratedAuthentications,
|
||||
oAuthAuthZRequests,
|
||||
oAuthClientAuthentications,
|
||||
oAuthClientAuthenticationFailures,
|
||||
oAuthClientCredentialRequestFailures,
|
||||
oAuthClientCredentialRequests,
|
||||
oAuthClientPrivateKeyJWTAuthenticationFailures,
|
||||
oAuthClientPrivateKeyJWTAuthentications,
|
||||
oAuthClientBasicAuthenticationFailures,
|
||||
oAuthClientBasicAuthentications,
|
||||
oAuthClientSecretPostAuthenticationFailures,
|
||||
oAuthClientSecretPostAuthentications,
|
||||
oAuthClientWindowsAuthenticationFailures,
|
||||
oAuthClientWindowsAuthentications,
|
||||
oAuthLogonCertRequestFailures,
|
||||
oAuthLogonCertTokenRequests,
|
||||
oAuthPasswordGrantRequestFailures,
|
||||
oAuthPasswordGrantRequests,
|
||||
oAuthTokenRequests,
|
||||
samlPTokenRequests,
|
||||
ssoAuthenticationFailures,
|
||||
ssoAuthentications,
|
||||
wsFedTokenRequests,
|
||||
wsTrustTokenRequests,
|
||||
usernamePasswordAuthenticationFailures,
|
||||
usernamePasswordAuthentications,
|
||||
externalAuthentications,
|
||||
externalAuthNFailures,
|
||||
artifactDBFailures,
|
||||
avgArtifactDBQueryTime,
|
||||
configDBFailures,
|
||||
avgConfigDBQueryTime,
|
||||
federationMetadataRequests,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("AD FS", []string{"*"}, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.adLoginConnectionFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
|
||||
"Total number of connection failures to an Active Directory domain controller",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.certificateAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "certificate_authentications_total"),
|
||||
"Total number of User Certificate authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.deviceAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "device_authentications_total"),
|
||||
"Total number of Device authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.extranetAccountLockouts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "extranet_account_lockouts_total"),
|
||||
"Total number of Extranet Account Lockouts",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.federatedAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "federated_authentications_total"),
|
||||
"Total number of authentications from a federated source",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.passportAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "passport_authentications_total"),
|
||||
"Total number of Microsoft Passport SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.passiveRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "passive_requests_total"),
|
||||
"Total number of passive (browser-based) requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.passwordChangeFailed = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "password_change_failed_total"),
|
||||
"Total number of failed password changes",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.passwordChangeSucceeded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "password_change_succeeded_total"),
|
||||
"Total number of successful password changes",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.tokenRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "token_requests_total"),
|
||||
"Total number of token requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.windowsIntegratedAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "windows_integrated_authentications_total"),
|
||||
"Total number of Windows integrated authentications (Kerberos/NTLM)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthAuthZRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_authorization_requests_total"),
|
||||
"Total number of incoming requests to the OAuth Authorization endpoint",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_authentication_success_total"),
|
||||
"Total number of successful OAuth client Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientAuthenticationsFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_authentication_failure_total"),
|
||||
"Total number of failed OAuth client Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientCredentialsRequestFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_credentials_failure_total"),
|
||||
"Total number of failed OAuth Client Credentials Requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientCredentialsRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_credentials_success_total"),
|
||||
"Total number of successful RP tokens issued for OAuth Client Credentials Requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientPrivateKeyJwtAuthenticationFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_privkey_jwt_authentication_failure_total"),
|
||||
"Total number of failed OAuth Client Private Key Jwt Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientPrivateKeyJwtAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_privkey_jwt_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Private Key Jwt Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientSecretBasicAuthenticationFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_secret_basic_authentications_failure_total"),
|
||||
"Total number of failed OAuth Client Secret Basic Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientSecretBasicAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_secret_basic_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Secret Basic Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientSecretPostAuthenticationFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_secret_post_authentications_failure_total"),
|
||||
"Total number of failed OAuth Client Secret Post Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientSecretPostAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_secret_post_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Secret Post Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientWindowsIntegratedAuthenticationFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_windows_authentications_failure_total"),
|
||||
"Total number of failed OAuth Client Windows Integrated Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthClientWindowsIntegratedAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_client_windows_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Windows Integrated Authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthLogonCertificateRequestFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_logon_certificate_requests_failure_total"),
|
||||
"Total number of failed OAuth Logon Certificate Requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthLogonCertificateTokenRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_logon_certificate_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued for OAuth Logon Certificate Requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthPasswordGrantRequestFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_password_grant_requests_failure_total"),
|
||||
"Total number of failed OAuth Password Grant Requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthPasswordGrantRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_password_grant_requests_success_total"),
|
||||
"Total number of successful OAuth Password Grant Requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.oAuthTokenRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "oauth_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over OAuth protocol",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.samlPTokenRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "samlp_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over SAML-P protocol",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.ssoAuthenticationFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sso_authentications_failure_total"),
|
||||
"Total number of failed SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.ssoAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sso_authentications_success_total"),
|
||||
"Total number of successful SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.wsFedTokenRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "wsfed_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over WS-Fed protocol",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.wsTrustTokenRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "wstrust_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over WS-Trust protocol",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.upAuthenticationFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "userpassword_authentications_failure_total"),
|
||||
"Total number of failed AD U/P authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.upAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "userpassword_authentications_success_total"),
|
||||
"Total number of successful AD U/P authentications",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.externalAuthenticationFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "external_authentications_failure_total"),
|
||||
"Total number of failed authentications from external MFA providers",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.externalAuthentications = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "external_authentications_success_total"),
|
||||
"Total number of successful authentications from external MFA providers",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.artifactDBFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "db_artifact_failure_total"),
|
||||
"Total number of failures connecting to the artifact database",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.avgArtifactDBQueryTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "db_artifact_query_time_seconds_total"),
|
||||
"Accumulator of time taken for an artifact database query",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.configDBFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "db_config_failure_total"),
|
||||
"Total number of failures connecting to the configuration database",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.avgConfigDBQueryTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "db_config_query_time_seconds_total"),
|
||||
"Accumulator of time taken for a configuration database query",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.federationMetadataRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "federation_metadata_requests_total"),
|
||||
"Total number of Federation Metadata requests",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
if utils.PDHEnabled() {
|
||||
return c.collectPDH(ch)
|
||||
}
|
||||
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
return c.collect(ctx, logger, ch)
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
var adfsData []perflibADFS
|
||||
|
||||
err := perflib.UnmarshalObject(ctx.PerfObjects["AD FS"], &adfsData, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.adLoginConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AdLoginConnectionFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.certificateAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].CertificateAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.deviceAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].DeviceAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.extranetAccountLockouts,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExtranetAccountLockouts,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federatedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].FederatedAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passportAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassportAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passiveRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassiveRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeFailed,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeFailed,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeSucceeded,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeSucceeded,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.tokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].TokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.windowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WindowsIntegratedAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthAuthZRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthAuthZRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientAuthenticationsFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientAuthenticationFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientCredentialsRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientCredentialRequestFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientCredentialsRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientCredentialRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientPrivateKeyJwtAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientPrivKeyJWTAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientPrivateKeyJwtAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientPrivKeyJWTAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretBasicAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientBasicAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretBasicAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientBasicAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretPostAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientSecretPostAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretPostAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientSecretPostAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientWindowsIntegratedAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientWindowsAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientWindowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientWindowsAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthLogonCertificateRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthLogonCertRequestFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthLogonCertificateTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthLogonCertTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthPasswordGrantRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthPasswordGrantRequestFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthPasswordGrantRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthPasswordGrantRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.samlPTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].SAMLPTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ssoAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].SSOAuthenticationFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ssoAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].SSOAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.wsFedTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WSFedTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.wsTrustTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WSTrustTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.upAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].UsernamePasswordAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.upAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].UsernamePasswordAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExternalAuthNFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExternalAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.artifactDBFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ArtifactDBFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgArtifactDBQueryTime,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AvgArtifactDBQueryTime*math.Pow(10, -8),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.configDBFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ConfigDBFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgConfigDBQueryTime,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AvgConfigDBQueryTime*math.Pow(10, -8),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federationMetadataRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].FederationMetadataRequests,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect ADFS metrics: %w", err)
|
||||
}
|
||||
|
||||
instanceKey := slices.Collect(maps.Keys(data))
|
||||
|
||||
if len(instanceKey) == 0 {
|
||||
return errors.New("perflib query for ADFS returned empty result set")
|
||||
}
|
||||
|
||||
adfsData, ok := data[instanceKey[0]]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for ADFS returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.adLoginConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[adLoginConnectionFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.certificateAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[certificateAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.deviceAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[deviceAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.extranetAccountLockouts,
|
||||
prometheus.CounterValue,
|
||||
adfsData[extranetAccountLockouts].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federatedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[federatedAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passportAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[passportAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passiveRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[passiveRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeFailed,
|
||||
prometheus.CounterValue,
|
||||
adfsData[passwordChangeFailed].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeSucceeded,
|
||||
prometheus.CounterValue,
|
||||
adfsData[passwordChangeSucceeded].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.tokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[tokenRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.windowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[windowsIntegratedAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthAuthZRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthAuthZRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientAuthenticationsFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientAuthenticationFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientCredentialsRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientCredentialRequestFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientCredentialsRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientCredentialRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientPrivateKeyJwtAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientPrivateKeyJWTAuthenticationFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientPrivateKeyJwtAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientPrivateKeyJWTAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretBasicAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientBasicAuthenticationFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretBasicAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientBasicAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretPostAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientSecretPostAuthenticationFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretPostAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientSecretPostAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientWindowsIntegratedAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientWindowsAuthenticationFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientWindowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthClientWindowsAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthLogonCertificateRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthLogonCertRequestFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthLogonCertificateTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthLogonCertTokenRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthPasswordGrantRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthPasswordGrantRequestFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthPasswordGrantRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthPasswordGrantRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[oAuthTokenRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.samlPTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[samlPTokenRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ssoAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[ssoAuthenticationFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ssoAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[ssoAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.wsFedTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[wsFedTokenRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.wsTrustTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[wsTrustTokenRequests].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.upAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[usernamePasswordAuthenticationFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.upAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[usernamePasswordAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[externalAuthNFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[externalAuthentications].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.artifactDBFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[artifactDBFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgArtifactDBQueryTime,
|
||||
prometheus.CounterValue,
|
||||
adfsData[avgArtifactDBQueryTime].FirstValue*math.Pow(10, -8),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.configDBFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[configDBFailures].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgConfigDBQueryTime,
|
||||
prometheus.CounterValue,
|
||||
adfsData[avgConfigDBQueryTime].FirstValue*math.Pow(10, -8),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federationMetadataRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[federationMetadataRequests].FirstValue,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package adfs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, adfs.Name, adfs.NewWithFlags)
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package adfs
|
||||
|
||||
const (
|
||||
adLoginConnectionFailures = "AD Login Connection Failures"
|
||||
artifactDBFailures = "Artifact Database Connection Failures"
|
||||
avgArtifactDBQueryTime = "Average Artifact Database Query Time"
|
||||
avgConfigDBQueryTime = "Average Config Database Query Time"
|
||||
certificateAuthentications = "Certificate Authentications"
|
||||
configDBFailures = "Configuration Database Connection Failures"
|
||||
deviceAuthentications = "Device Authentications"
|
||||
externalAuthentications = "External Authentications"
|
||||
externalAuthNFailures = "External Authentication Failures"
|
||||
extranetAccountLockouts = "Extranet Account Lockouts"
|
||||
federatedAuthentications = "Federated Authentications"
|
||||
federationMetadataRequests = "Federation Metadata Requests"
|
||||
oAuthAuthZRequests = "OAuth AuthZ Requests"
|
||||
oAuthClientAuthenticationFailures = "OAuth Client Authentications Failures"
|
||||
oAuthClientAuthentications = "OAuth Client Authentications"
|
||||
oAuthClientBasicAuthenticationFailures = "OAuth Client Secret Basic Authentication Failures"
|
||||
oAuthClientBasicAuthentications = "OAuth Client Secret Basic Authentication Requests"
|
||||
oAuthClientCredentialRequestFailures = "OAuth Client Credentials Request Failures"
|
||||
oAuthClientCredentialRequests = "OAuth Client Credentials Requests"
|
||||
oAuthClientPrivateKeyJWTAuthenticationFailures = "OAuth Client Private Key Jwt Authentication Failures"
|
||||
oAuthClientPrivateKeyJWTAuthentications = "OAuth Client Private Key Jwt Authentications"
|
||||
oAuthClientSecretPostAuthenticationFailures = "OAuth Client Secret Post Authentication Failures"
|
||||
oAuthClientSecretPostAuthentications = "OAuth Client Secret Post Authentications"
|
||||
oAuthClientWindowsAuthenticationFailures = "OAuth Client Windows Integrated Authentication Failures"
|
||||
oAuthClientWindowsAuthentications = "OAuth Client Windows Integrated Authentications"
|
||||
oAuthLogonCertRequestFailures = "OAuth Logon Certificate Request Failures"
|
||||
oAuthLogonCertTokenRequests = "OAuth Logon Certificate Token Requests"
|
||||
oAuthPasswordGrantRequestFailures = "OAuth Password Grant Request Failures"
|
||||
oAuthPasswordGrantRequests = "OAuth Password Grant Requests"
|
||||
oAuthTokenRequests = "OAuth Token Requests"
|
||||
passiveRequests = "Passive Requests"
|
||||
passportAuthentications = "Microsoft Passport Authentications"
|
||||
passwordChangeFailed = "Password Change Failed Requests"
|
||||
passwordChangeSucceeded = "Password Change Successful Requests"
|
||||
samlPTokenRequests = "SAML-P Token Requests"
|
||||
ssoAuthenticationFailures = "SSO Authentication Failures"
|
||||
ssoAuthentications = "SSO Authentications"
|
||||
tokenRequests = "Token Requests"
|
||||
usernamePasswordAuthenticationFailures = "U/P Authentication Failures"
|
||||
usernamePasswordAuthentications = "U/P Authentications"
|
||||
windowsIntegratedAuthentications = "Windows Integrated Authentications"
|
||||
wsFedTokenRequests = "WS-Fed Token Requests"
|
||||
wsTrustTokenRequests = "WS-Trust Token Requests"
|
||||
)
|
||||
|
||||
type perflibADFS struct {
|
||||
AdLoginConnectionFailures float64 `perflib:"AD Login Connection Failures"`
|
||||
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
|
||||
DeviceAuthentications float64 `perflib:"Device Authentications"`
|
||||
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
|
||||
FederatedAuthentications float64 `perflib:"Federated Authentications"`
|
||||
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
|
||||
PassiveRequests float64 `perflib:"Passive Requests"`
|
||||
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
|
||||
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
|
||||
TokenRequests float64 `perflib:"Token Requests"`
|
||||
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
|
||||
OAuthAuthZRequests float64 `perflib:"OAuth AuthZ Requests"`
|
||||
OAuthClientAuthentications float64 `perflib:"OAuth Client Authentications"`
|
||||
OAuthClientAuthenticationFailures float64 `perflib:"OAuth Client Authentications Failures"`
|
||||
OAuthClientCredentialRequestFailures float64 `perflib:"OAuth Client Credentials Request Failures"`
|
||||
OAuthClientCredentialRequests float64 `perflib:"OAuth Client Credentials Requests"`
|
||||
OAuthClientPrivKeyJWTAuthnFailures float64 `perflib:"OAuth Client Private Key Jwt Authentication Failures"`
|
||||
OAuthClientPrivKeyJWTAuthentications float64 `perflib:"OAuth Client Private Key Jwt Authentications"`
|
||||
OAuthClientBasicAuthnFailures float64 `perflib:"OAuth Client Secret Basic Authentication Failures"`
|
||||
OAuthClientBasicAuthentications float64 `perflib:"OAuth Client Secret Basic Authentication Requests"`
|
||||
OAuthClientSecretPostAuthnFailures float64 `perflib:"OAuth Client Secret Post Authentication Failures"`
|
||||
OAuthClientSecretPostAuthentications float64 `perflib:"OAuth Client Secret Post Authentications"`
|
||||
OAuthClientWindowsAuthnFailures float64 `perflib:"OAuth Client Windows Integrated Authentication Failures"`
|
||||
OAuthClientWindowsAuthentications float64 `perflib:"OAuth Client Windows Integrated Authentications"`
|
||||
OAuthLogonCertRequestFailures float64 `perflib:"OAuth Logon Certificate Request Failures"`
|
||||
OAuthLogonCertTokenRequests float64 `perflib:"OAuth Logon Certificate Token Requests"`
|
||||
OAuthPasswordGrantRequestFailures float64 `perflib:"OAuth Password Grant Request Failures"`
|
||||
OAuthPasswordGrantRequests float64 `perflib:"OAuth Password Grant Requests"`
|
||||
OAuthTokenRequests float64 `perflib:"OAuth Token Requests"`
|
||||
SAMLPTokenRequests float64 `perflib:"SAML-P Token Requests"`
|
||||
SSOAuthenticationFailures float64 `perflib:"SSO Authentication Failures"`
|
||||
SSOAuthentications float64 `perflib:"SSO Authentications"`
|
||||
WSFedTokenRequests float64 `perflib:"WS-Fed Token Requests"`
|
||||
WSTrustTokenRequests float64 `perflib:"WS-Trust Token Requests"`
|
||||
UsernamePasswordAuthnFailures float64 `perflib:"U/P Authentication Failures"`
|
||||
UsernamePasswordAuthentications float64 `perflib:"U/P Authentications"`
|
||||
ExternalAuthentications float64 `perflib:"External Authentications"`
|
||||
ExternalAuthNFailures float64 `perflib:"External Authentication Failures"`
|
||||
ArtifactDBFailures float64 `perflib:"Artifact Database Connection Failures"`
|
||||
AvgArtifactDBQueryTime float64 `perflib:"Average Artifact Database Query Time"`
|
||||
ConfigDBFailures float64 `perflib:"Configuration Database Connection Failures"`
|
||||
AvgConfigDBQueryTime float64 `perflib:"Average Config Database Query Time"`
|
||||
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
|
||||
}
|
||||
707
pkg/collector/cache/cache.go
vendored
707
pkg/collector/cache/cache.go
vendored
@@ -1,707 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "cache"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for Perflib Cache metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
perfDataCollector *perfdata.Collector
|
||||
|
||||
asyncCopyReadsTotal *prometheus.Desc
|
||||
asyncDataMapsTotal *prometheus.Desc
|
||||
asyncFastReadsTotal *prometheus.Desc
|
||||
asyncMDLReadsTotal *prometheus.Desc
|
||||
asyncPinReadsTotal *prometheus.Desc
|
||||
copyReadHitsTotal *prometheus.Desc
|
||||
copyReadsTotal *prometheus.Desc
|
||||
dataFlushesTotal *prometheus.Desc
|
||||
dataFlushPagesTotal *prometheus.Desc
|
||||
dataMapHitsPercent *prometheus.Desc
|
||||
dataMapPinsTotal *prometheus.Desc
|
||||
dataMapsTotal *prometheus.Desc
|
||||
dirtyPages *prometheus.Desc
|
||||
dirtyPageThreshold *prometheus.Desc
|
||||
fastReadNotPossiblesTotal *prometheus.Desc
|
||||
fastReadResourceMissesTotal *prometheus.Desc
|
||||
fastReadsTotal *prometheus.Desc
|
||||
lazyWriteFlushesTotal *prometheus.Desc
|
||||
lazyWritePagesTotal *prometheus.Desc
|
||||
mdlReadHitsTotal *prometheus.Desc
|
||||
mdlReadsTotal *prometheus.Desc
|
||||
pinReadHitsTotal *prometheus.Desc
|
||||
pinReadsTotal *prometheus.Desc
|
||||
readAheadsTotal *prometheus.Desc
|
||||
syncCopyReadsTotal *prometheus.Desc
|
||||
syncDataMapsTotal *prometheus.Desc
|
||||
syncFastReadsTotal *prometheus.Desc
|
||||
syncMDLReadsTotal *prometheus.Desc
|
||||
syncPinReadsTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
if utils.PDHEnabled() {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return []string{"Cache"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
if utils.PDHEnabled() {
|
||||
counters := []string{
|
||||
asyncCopyReadsTotal,
|
||||
asyncDataMapsTotal,
|
||||
asyncFastReadsTotal,
|
||||
asyncMDLReadsTotal,
|
||||
asyncPinReadsTotal,
|
||||
copyReadHitsTotal,
|
||||
copyReadsTotal,
|
||||
dataFlushesTotal,
|
||||
dataFlushPagesTotal,
|
||||
dataMapHitsPercent,
|
||||
dataMapPinsTotal,
|
||||
dataMapsTotal,
|
||||
dirtyPages,
|
||||
dirtyPageThreshold,
|
||||
fastReadNotPossiblesTotal,
|
||||
fastReadResourceMissesTotal,
|
||||
fastReadsTotal,
|
||||
lazyWriteFlushesTotal,
|
||||
lazyWritePagesTotal,
|
||||
mdlReadHitsTotal,
|
||||
mdlReadsTotal,
|
||||
pinReadHitsTotal,
|
||||
pinReadsTotal,
|
||||
readAheadsTotal,
|
||||
syncCopyReadsTotal,
|
||||
syncDataMapsTotal,
|
||||
syncFastReadsTotal,
|
||||
syncMDLReadsTotal,
|
||||
syncPinReadsTotal,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Cache", []string{"*"}, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.asyncCopyReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
|
||||
"(AsyncCopyReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.asyncDataMapsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_data_maps_total"),
|
||||
"(AsyncDataMapsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.asyncFastReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_fast_reads_total"),
|
||||
"(AsyncFastReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.asyncMDLReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_mdl_reads_total"),
|
||||
"(AsyncMDLReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.asyncPinReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_pin_reads_total"),
|
||||
"(AsyncPinReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.copyReadHitsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "copy_read_hits_total"),
|
||||
"(CopyReadHitsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.copyReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "copy_reads_total"),
|
||||
"(CopyReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dataFlushesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "data_flushes_total"),
|
||||
"(DataFlushesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dataFlushPagesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "data_flush_pages_total"),
|
||||
"(DataFlushPagesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dataMapHitsPercent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "data_map_hits_percent"),
|
||||
"(DataMapHitsPercent)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dataMapPinsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "data_map_pins_total"),
|
||||
"(DataMapPinsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dataMapsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "data_maps_total"),
|
||||
"(DataMapsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dirtyPages = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "dirty_pages"),
|
||||
"(DirtyPages)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dirtyPageThreshold = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "dirty_page_threshold"),
|
||||
"(DirtyPageThreshold)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.fastReadNotPossiblesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "fast_read_not_possibles_total"),
|
||||
"(FastReadNotPossiblesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.fastReadResourceMissesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "fast_read_resource_misses_total"),
|
||||
"(FastReadResourceMissesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.fastReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "fast_reads_total"),
|
||||
"(FastReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.lazyWriteFlushesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "lazy_write_flushes_total"),
|
||||
"(LazyWriteFlushesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.lazyWritePagesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "lazy_write_pages_total"),
|
||||
"(LazyWritePagesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.mdlReadHitsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "mdl_read_hits_total"),
|
||||
"(MDLReadHitsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.mdlReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "mdl_reads_total"),
|
||||
"(MDLReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.pinReadHitsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pin_read_hits_total"),
|
||||
"(PinReadHitsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.pinReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pin_reads_total"),
|
||||
"(PinReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.readAheadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_aheads_total"),
|
||||
"(ReadAheadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.syncCopyReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sync_copy_reads_total"),
|
||||
"(SyncCopyReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.syncDataMapsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sync_data_maps_total"),
|
||||
"(SyncDataMapsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.syncFastReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sync_fast_reads_total"),
|
||||
"(SyncFastReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.syncMDLReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sync_mdl_reads_total"),
|
||||
"(SyncMDLReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.syncPinReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "sync_pin_reads_total"),
|
||||
"(SyncPinReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect implements the Collector interface.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
if utils.PDHEnabled() {
|
||||
return c.collectPDH(ch)
|
||||
}
|
||||
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting cache metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
var dst []perflibCache // Single-instance class, array is required but will have single entry.
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dst) != 1 {
|
||||
return errors.New("expected single instance of Cache")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncCopyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncCopyReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncDataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncDataMapsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncFastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncFastReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncMDLReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncMDLReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncPinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncPinReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.copyReadHitsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CopyReadHitsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.copyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].CopyReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataFlushesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataFlushPagesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataFlushPagesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataMapHitsPercent,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].DataMapHitsPercent,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataMapPinsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataMapPinsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataMapsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dirtyPages,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].DirtyPages,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dirtyPageThreshold,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].DirtyPageThreshold,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fastReadNotPossiblesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].FastReadNotPossiblesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fastReadResourceMissesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].FastReadResourceMissesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].FastReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.lazyWriteFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].LazyWriteFlushesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.lazyWritePagesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].LazyWritePagesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mdlReadHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].MDLReadHitsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mdlReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].MDLReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pinReadHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PinReadHitsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PinReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readAheadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].ReadAheadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncCopyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncCopyReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncDataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncDataMapsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncFastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncFastReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncMDLReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncMDLReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncPinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncPinReadsTotal,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Cache metrics: %w", err)
|
||||
}
|
||||
|
||||
cacheData, ok := data["*"]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for Cache returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncCopyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[asyncCopyReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncDataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[asyncDataMapsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncFastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[asyncFastReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncMDLReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[asyncMDLReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.asyncPinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[asyncPinReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.copyReadHitsTotal,
|
||||
prometheus.GaugeValue,
|
||||
cacheData[copyReadHitsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.copyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[copyReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[dataFlushesTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataFlushPagesTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[dataFlushPagesTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataMapHitsPercent,
|
||||
prometheus.GaugeValue,
|
||||
cacheData[dataMapHitsPercent].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataMapPinsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[dataMapPinsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[dataMapsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dirtyPages,
|
||||
prometheus.GaugeValue,
|
||||
cacheData[dirtyPages].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dirtyPageThreshold,
|
||||
prometheus.GaugeValue,
|
||||
cacheData[dirtyPageThreshold].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fastReadNotPossiblesTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[fastReadNotPossiblesTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fastReadResourceMissesTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[fastReadResourceMissesTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[fastReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.lazyWriteFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[lazyWriteFlushesTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.lazyWritePagesTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[lazyWritePagesTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mdlReadHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[mdlReadHitsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mdlReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[mdlReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pinReadHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[pinReadHitsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[pinReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readAheadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[readAheadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncCopyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[syncCopyReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncDataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[syncDataMapsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncFastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[syncFastReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncMDLReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[syncMDLReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncPinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
cacheData[syncPinReadsTotal].FirstValue,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
12
pkg/collector/cache/cache_test.go
vendored
12
pkg/collector/cache/cache_test.go
vendored
@@ -1,12 +0,0 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cache"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, cache.Name, cache.NewWithFlags)
|
||||
}
|
||||
69
pkg/collector/cache/const.go
vendored
69
pkg/collector/cache/const.go
vendored
@@ -1,69 +0,0 @@
|
||||
package cache
|
||||
|
||||
// Perflib "Cache":
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
|
||||
const (
|
||||
asyncCopyReadsTotal = "Async Copy Reads/sec"
|
||||
asyncDataMapsTotal = "Async Data Maps/sec"
|
||||
asyncFastReadsTotal = "Async Fast Reads/sec"
|
||||
asyncMDLReadsTotal = "Async MDL Reads/sec"
|
||||
asyncPinReadsTotal = "Async Pin Reads/sec"
|
||||
copyReadHitsTotal = "Copy Read Hits %"
|
||||
copyReadsTotal = "Copy Reads/sec"
|
||||
dataFlushesTotal = "Data Flushes/sec"
|
||||
dataFlushPagesTotal = "Data Flush Pages/sec"
|
||||
dataMapHitsPercent = "Data Map Hits %"
|
||||
dataMapPinsTotal = "Data Map Pins/sec"
|
||||
dataMapsTotal = "Data Maps/sec"
|
||||
dirtyPages = "Dirty Pages"
|
||||
dirtyPageThreshold = "Dirty Page Threshold"
|
||||
fastReadNotPossiblesTotal = "Fast Read Not Possibles/sec"
|
||||
fastReadResourceMissesTotal = "Fast Read Resource Misses/sec"
|
||||
fastReadsTotal = "Fast Reads/sec"
|
||||
lazyWriteFlushesTotal = "Lazy Write Flushes/sec"
|
||||
lazyWritePagesTotal = "Lazy Write Pages/sec"
|
||||
mdlReadHitsTotal = "MDL Read Hits %"
|
||||
mdlReadsTotal = "MDL Reads/sec"
|
||||
pinReadHitsTotal = "Pin Read Hits %"
|
||||
pinReadsTotal = "Pin Reads/sec"
|
||||
readAheadsTotal = "Read Aheads/sec"
|
||||
syncCopyReadsTotal = "Sync Copy Reads/sec"
|
||||
syncDataMapsTotal = "Sync Data Maps/sec"
|
||||
syncFastReadsTotal = "Sync Fast Reads/sec"
|
||||
syncMDLReadsTotal = "Sync MDL Reads/sec"
|
||||
syncPinReadsTotal = "Sync Pin Reads/sec"
|
||||
)
|
||||
|
||||
// Perflib "Cache":
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
|
||||
type perflibCache struct {
|
||||
AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"`
|
||||
AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"`
|
||||
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
|
||||
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
|
||||
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
|
||||
CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
|
||||
CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
|
||||
DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
|
||||
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
|
||||
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
|
||||
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
|
||||
DataMapsTotal float64 `perflib:"Data Maps/sec"`
|
||||
DirtyPages float64 `perflib:"Dirty Pages"`
|
||||
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
|
||||
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
|
||||
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
|
||||
FastReadsTotal float64 `perflib:"Fast Reads/sec"`
|
||||
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
|
||||
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
|
||||
MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
|
||||
MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
|
||||
PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
|
||||
PinReadsTotal float64 `perflib:"Pin Reads/sec"`
|
||||
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
|
||||
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
|
||||
SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"`
|
||||
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
|
||||
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
|
||||
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
|
||||
}
|
||||
@@ -10,56 +10,56 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/ad"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cache"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/container"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu_info"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dfsr"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dhcp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/diskdrive"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dns"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/filetime"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logon"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/memory"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/net"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/netframework"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/nps"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/os"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/process"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/remote_fx"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/scheduled_task"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/service"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smb"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smtp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/system"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/tcp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/teradici_pcoip"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/terminal_services"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/textfile"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/thermalzone"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/time"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/updates"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware_blast"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/ad"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/adcs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/adfs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cache"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/container"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cpu"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cpu_info"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dfsr"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dhcp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/diskdrive"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dns"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/exchange"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/filetime"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/fsrmquota"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/hyperv"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/iis"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/license"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/logical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/logon"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/memory"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/mscluster"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/msmq"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/mssql"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/net"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/netframework"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/nps"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/os"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/physical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/printer"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/process"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/remote_fx"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/scheduled_task"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/service"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smb"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smbclient"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smtp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/system"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/tcp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/teradici_pcoip"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/terminal_services"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/textfile"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/thermalzone"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/time"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/updates"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/vmware"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/vmware_blast"
|
||||
"github.com/prometheus-community/windows_exporter/internal/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
@@ -74,7 +74,7 @@ func NewWithFlags(app *kingpin.Application) *MetricCollectors {
|
||||
return New(collectors)
|
||||
}
|
||||
|
||||
// NewWithConfig To be called by the external libraries for collector initialization without running kingpin.Parse
|
||||
// NewWithConfig To be called by the external libraries for collector initialization without running [kingpin.Parse].
|
||||
//
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewWithConfig(config Config) *MetricCollectors {
|
||||
@@ -94,19 +94,19 @@ func NewWithConfig(config Config) *MetricCollectors {
|
||||
collectors[exchange.Name] = exchange.New(&config.Exchange)
|
||||
collectors[filetime.Name] = filetime.New(&config.Filetime)
|
||||
collectors[fsrmquota.Name] = fsrmquota.New(&config.Fsrmquota)
|
||||
collectors[hyperv.Name] = hyperv.New(&config.Hyperv)
|
||||
collectors[hyperv.Name] = hyperv.New(&config.HyperV)
|
||||
collectors[iis.Name] = iis.New(&config.IIS)
|
||||
collectors[license.Name] = license.New(&config.License)
|
||||
collectors[logical_disk.Name] = logical_disk.New(&config.LogicalDisk)
|
||||
collectors[logon.Name] = logon.New(&config.Logon)
|
||||
collectors[memory.Name] = memory.New(&config.Memory)
|
||||
collectors[mscluster.Name] = mscluster.New(&config.Mscluster)
|
||||
collectors[mscluster.Name] = mscluster.New(&config.MSCluster)
|
||||
collectors[msmq.Name] = msmq.New(&config.Msmq)
|
||||
collectors[mssql.Name] = mssql.New(&config.Mssql)
|
||||
collectors[net.Name] = net.New(&config.Net)
|
||||
collectors[netframework.Name] = netframework.New(&config.NetFramework)
|
||||
collectors[nps.Name] = nps.New(&config.Nps)
|
||||
collectors[os.Name] = os.New(&config.Os)
|
||||
collectors[os.Name] = os.New(&config.OS)
|
||||
collectors[perfdata.Name] = perfdata.New(&config.PerfData)
|
||||
collectors[physical_disk.Name] = physical_disk.New(&config.PhysicalDisk)
|
||||
collectors[printer.Name] = printer.New(&config.Printer)
|
||||
@@ -122,7 +122,7 @@ func NewWithConfig(config Config) *MetricCollectors {
|
||||
collectors[tcp.Name] = tcp.New(&config.TCP)
|
||||
collectors[terminal_services.Name] = terminal_services.New(&config.TerminalServices)
|
||||
collectors[textfile.Name] = textfile.New(&config.Textfile)
|
||||
collectors[thermalzone.Name] = thermalzone.New(&config.Thermalzone)
|
||||
collectors[thermalzone.Name] = thermalzone.New(&config.ThermalZone)
|
||||
collectors[time.Name] = time.New(&config.Time)
|
||||
collectors[updates.Name] = updates.New(&config.Updates)
|
||||
collectors[vmware.Name] = vmware.New(&config.Vmware)
|
||||
@@ -199,16 +199,17 @@ func (c *MetricCollectors) Build(logger *slog.Logger) error {
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape.
|
||||
func (c *MetricCollectors) PrepareScrapeContext() (*types.ScrapeContext, error) {
|
||||
if c.PerfCounterQuery == "" { // if perfCounterQuery is empty, no perf counters are needed.
|
||||
// If no perf counters to query, return an empty context.
|
||||
if c.PerfCounterQuery == "" {
|
||||
return &types.ScrapeContext{}, nil
|
||||
}
|
||||
|
||||
objs, err := perflib.GetPerflibSnapshot(c.PerfCounterQuery)
|
||||
perfObjects, err := perflib.GetPerflibSnapshot(c.PerfCounterQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.ScrapeContext{PerfObjects: objs}, nil
|
||||
return &types.ScrapeContext{PerfObjects: perfObjects}, nil
|
||||
}
|
||||
|
||||
// Close To be called by the exporter for collector cleanup.
|
||||
|
||||
@@ -1,54 +1,54 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/ad"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cache"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/container"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu_info"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dfsr"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dhcp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/diskdrive"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dns"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/filetime"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logon"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/memory"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/net"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/netframework"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/nps"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/os"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/process"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/remote_fx"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/scheduled_task"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/service"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smb"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smtp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/system"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/tcp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/teradici_pcoip"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/terminal_services"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/textfile"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/thermalzone"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/time"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/updates"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware_blast"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/ad"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/adcs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/adfs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cache"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/container"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cpu"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cpu_info"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dfsr"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dhcp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/diskdrive"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dns"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/exchange"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/filetime"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/fsrmquota"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/hyperv"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/iis"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/license"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/logical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/logon"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/memory"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/mscluster"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/msmq"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/mssql"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/net"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/netframework"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/nps"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/os"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/physical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/printer"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/process"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/remote_fx"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/scheduled_task"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/service"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smb"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smbclient"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smtp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/system"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/tcp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/teradici_pcoip"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/terminal_services"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/textfile"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/thermalzone"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/time"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/updates"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/vmware"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/vmware_blast"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
@@ -62,24 +62,24 @@ type Config struct {
|
||||
Cs cs.Config `yaml:"cs"`
|
||||
DFSR dfsr.Config `yaml:"dfsr"`
|
||||
Dhcp dhcp.Config `yaml:"dhcp"`
|
||||
DiskDrive diskdrive.Config `yaml:"diskdrive"` //nolint:tagliatelle
|
||||
DiskDrive diskdrive.Config `yaml:"disk_drive"`
|
||||
DNS dns.Config `yaml:"dns"`
|
||||
Exchange exchange.Config `yaml:"exchange"`
|
||||
Filetime filetime.Config `yaml:"filetime"`
|
||||
Fsrmquota fsrmquota.Config `yaml:"fsrmquota"`
|
||||
Hyperv hyperv.Config `yaml:"hyperv"`
|
||||
HyperV hyperv.Config `yaml:"hyper_v"`
|
||||
IIS iis.Config `yaml:"iis"`
|
||||
License license.Config `yaml:"license"`
|
||||
LogicalDisk logical_disk.Config `yaml:"logical_disk"`
|
||||
Logon logon.Config `yaml:"logon"`
|
||||
Memory memory.Config `yaml:"memory"`
|
||||
Mscluster mscluster.Config `yaml:"mscluster"`
|
||||
MSCluster mscluster.Config `yaml:"ms_cluster"`
|
||||
Msmq msmq.Config `yaml:"msmq"`
|
||||
Mssql mssql.Config `yaml:"mssql"`
|
||||
Net net.Config `yaml:"net"`
|
||||
NetFramework netframework.Config `yaml:"net_framework"`
|
||||
Nps nps.Config `yaml:"nps"`
|
||||
Os os.Config `yaml:"os"`
|
||||
OS os.Config `yaml:"os"`
|
||||
PerfData perfdata.Config `yaml:"perf_data"`
|
||||
PhysicalDisk physical_disk.Config `yaml:"physical_disk"`
|
||||
Printer printer.Config `yaml:"printer"`
|
||||
@@ -88,14 +88,14 @@ type Config struct {
|
||||
ScheduledTask scheduled_task.Config `yaml:"scheduled_task"`
|
||||
Service service.Config `yaml:"service"`
|
||||
SMB smb.Config `yaml:"smb"`
|
||||
SMBClient smbclient.Config `yaml:"smbclient"` //nolint:tagliatelle
|
||||
SMBClient smbclient.Config `yaml:"smb_client"`
|
||||
SMTP smtp.Config `yaml:"smtp"`
|
||||
System system.Config `yaml:"system"`
|
||||
TeradiciPcoip teradici_pcoip.Config `yaml:"teradici_pcoip"`
|
||||
TCP tcp.Config `yaml:"tcp"`
|
||||
TerminalServices terminal_services.Config `yaml:"terminal_services"`
|
||||
Textfile textfile.Config `yaml:"textfile"`
|
||||
Thermalzone thermalzone.Config `yaml:"thermalzone"`
|
||||
ThermalZone thermalzone.Config `yaml:"thermal_zone"`
|
||||
Time time.Config `yaml:"time"`
|
||||
Updates updates.Config `yaml:"updates"`
|
||||
Vmware vmware.Config `yaml:"vmware"`
|
||||
@@ -121,19 +121,19 @@ var ConfigDefaults = Config{
|
||||
Exchange: exchange.ConfigDefaults,
|
||||
Filetime: filetime.ConfigDefaults,
|
||||
Fsrmquota: fsrmquota.ConfigDefaults,
|
||||
Hyperv: hyperv.ConfigDefaults,
|
||||
HyperV: hyperv.ConfigDefaults,
|
||||
IIS: iis.ConfigDefaults,
|
||||
License: license.ConfigDefaults,
|
||||
LogicalDisk: logical_disk.ConfigDefaults,
|
||||
Logon: logon.ConfigDefaults,
|
||||
Memory: memory.ConfigDefaults,
|
||||
Mscluster: mscluster.ConfigDefaults,
|
||||
MSCluster: mscluster.ConfigDefaults,
|
||||
Msmq: msmq.ConfigDefaults,
|
||||
Mssql: mssql.ConfigDefaults,
|
||||
Net: net.ConfigDefaults,
|
||||
NetFramework: netframework.ConfigDefaults,
|
||||
Nps: nps.ConfigDefaults,
|
||||
Os: os.ConfigDefaults,
|
||||
OS: os.ConfigDefaults,
|
||||
PerfData: perfdata.ConfigDefaults,
|
||||
PhysicalDisk: physical_disk.ConfigDefaults,
|
||||
Printer: printer.ConfigDefaults,
|
||||
@@ -149,7 +149,7 @@ var ConfigDefaults = Config{
|
||||
TCP: tcp.ConfigDefaults,
|
||||
TerminalServices: terminal_services.ConfigDefaults,
|
||||
Textfile: textfile.ConfigDefaults,
|
||||
Thermalzone: thermalzone.ConfigDefaults,
|
||||
ThermalZone: thermalzone.ConfigDefaults,
|
||||
Time: time.ConfigDefaults,
|
||||
Updates: updates.ConfigDefaults,
|
||||
Vmware: vmware.ConfigDefaults,
|
||||
|
||||
@@ -1,461 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "container"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for containers metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
// Presence
|
||||
containerAvailable *prometheus.Desc
|
||||
|
||||
// Number of containers
|
||||
containersCount *prometheus.Desc
|
||||
|
||||
// Memory
|
||||
usageCommitBytes *prometheus.Desc
|
||||
usageCommitPeakBytes *prometheus.Desc
|
||||
usagePrivateWorkingSetBytes *prometheus.Desc
|
||||
|
||||
// CPU
|
||||
runtimeTotal *prometheus.Desc
|
||||
runtimeUser *prometheus.Desc
|
||||
runtimeKernel *prometheus.Desc
|
||||
|
||||
// Network
|
||||
bytesReceived *prometheus.Desc
|
||||
bytesSent *prometheus.Desc
|
||||
packetsReceived *prometheus.Desc
|
||||
packetsSent *prometheus.Desc
|
||||
droppedPacketsIncoming *prometheus.Desc
|
||||
droppedPacketsOutgoing *prometheus.Desc
|
||||
|
||||
// Storage
|
||||
readCountNormalized *prometheus.Desc
|
||||
readSizeBytes *prometheus.Desc
|
||||
writeCountNormalized *prometheus.Desc
|
||||
writeSizeBytes *prometheus.Desc
|
||||
}
|
||||
|
||||
// New constructs a new Collector.
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
c.containerAvailable = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "available"),
|
||||
"Available",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.containersCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "count"),
|
||||
"Number of containers",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.usageCommitBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "memory_usage_commit_bytes"),
|
||||
"Memory Usage Commit Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.usageCommitPeakBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "memory_usage_commit_peak_bytes"),
|
||||
"Memory Usage Commit Peak Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.usagePrivateWorkingSetBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "memory_usage_private_working_set_bytes"),
|
||||
"Memory Usage Private Working Set Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.runtimeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cpu_usage_seconds_total"),
|
||||
"Total Run time in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.runtimeUser = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cpu_usage_seconds_usermode"),
|
||||
"Run Time in User mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.runtimeKernel = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cpu_usage_seconds_kernelmode"),
|
||||
"Run time in Kernel mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.bytesReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "network_receive_bytes_total"),
|
||||
"Bytes Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
)
|
||||
c.bytesSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "network_transmit_bytes_total"),
|
||||
"Bytes Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
)
|
||||
c.packetsReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "network_receive_packets_total"),
|
||||
"Packets Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
)
|
||||
c.packetsSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "network_transmit_packets_total"),
|
||||
"Packets Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
)
|
||||
c.droppedPacketsIncoming = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "network_receive_packets_dropped_total"),
|
||||
"Dropped Incoming Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
)
|
||||
c.droppedPacketsOutgoing = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "network_transmit_packets_dropped_total"),
|
||||
"Dropped Outgoing Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
)
|
||||
c.readCountNormalized = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "storage_read_count_normalized_total"),
|
||||
"Read Count Normalized",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.readSizeBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "storage_read_size_bytes_total"),
|
||||
"Read Size Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.writeCountNormalized = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "storage_write_count_normalized_total"),
|
||||
"Write Count Normalized",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
c.writeSizeBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "storage_write_size_bytes_total"),
|
||||
"Write Size Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(logger, ch); err != nil {
|
||||
logger.Error("failed collecting collector metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
// Types Container is passed to get the containers compute systems only
|
||||
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
|
||||
if err != nil {
|
||||
logger.Error("Err in Getting containers",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
count := len(containers)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.containersCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
containerPrefixes := make(map[string]string)
|
||||
collectErrors := make([]error, 0, len(containers))
|
||||
|
||||
for _, containerDetails := range containers {
|
||||
containerIdWithPrefix := getContainerIdWithPrefix(containerDetails)
|
||||
|
||||
if err = c.collectContainer(logger, ch, containerDetails, containerIdWithPrefix); err != nil {
|
||||
if hcsshim.IsNotExist(err) {
|
||||
logger.Debug("err in fetching container statistics",
|
||||
slog.String("container_id", containerDetails.ID),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
} else {
|
||||
logger.Error("err in fetching container statistics",
|
||||
slog.String("container_id", containerDetails.ID),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
collectErrors = append(collectErrors, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
containerPrefixes[containerDetails.ID] = containerIdWithPrefix
|
||||
}
|
||||
|
||||
if err = c.collectNetworkMetrics(logger, ch, containerPrefixes); err != nil {
|
||||
return fmt.Errorf("error in fetching container network statistics: %w", err)
|
||||
}
|
||||
|
||||
if len(collectErrors) > 0 {
|
||||
return fmt.Errorf("errors while fetching container statistics: %w", errors.Join(collectErrors...))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectContainer(logger *slog.Logger, ch chan<- prometheus.Metric, containerDetails hcsshim.ContainerProperties, containerIdWithPrefix string) error {
|
||||
container, err := hcsshim.OpenContainer(containerDetails.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in opening container: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if container == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := container.Close(); err != nil {
|
||||
logger.Error("error in closing container",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
containerStats, err := container.Statistics()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in fetching container statistics: %w", err)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.containerAvailable,
|
||||
prometheus.CounterValue,
|
||||
1,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.usageCommitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(containerStats.Memory.UsageCommitBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.usageCommitPeakBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(containerStats.Memory.UsageCommitPeakBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.usagePrivateWorkingSetBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(containerStats.Memory.UsagePrivateWorkingSetBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.runtimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(containerStats.Processor.TotalRuntime100ns)*perflib.TicksToSecondScaleFactor,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.runtimeUser,
|
||||
prometheus.CounterValue,
|
||||
float64(containerStats.Processor.RuntimeUser100ns)*perflib.TicksToSecondScaleFactor,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.runtimeKernel,
|
||||
prometheus.CounterValue,
|
||||
float64(containerStats.Processor.RuntimeKernel100ns)*perflib.TicksToSecondScaleFactor,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readCountNormalized,
|
||||
prometheus.CounterValue,
|
||||
float64(containerStats.Storage.ReadCountNormalized),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readSizeBytes,
|
||||
prometheus.CounterValue,
|
||||
float64(containerStats.Storage.ReadSizeBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeCountNormalized,
|
||||
prometheus.CounterValue,
|
||||
float64(containerStats.Storage.WriteCountNormalized),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeSizeBytes,
|
||||
prometheus.CounterValue,
|
||||
float64(containerStats.Storage.WriteSizeBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// collectNetworkMetrics collects network metrics for containers.
|
||||
// With HNSv2, the network stats must be collected from hcsshim.HNSListEndpointRequest.
|
||||
// Network statistics from the container.Statistics() are providing data only, if HNSv1 is used.
|
||||
// Ref: https://github.com/prometheus-community/windows_exporter/pull/1218
|
||||
func (c *Collector) collectNetworkMetrics(logger *slog.Logger, ch chan<- prometheus.Metric, containerPrefixes map[string]string) error {
|
||||
hnsEndpoints, err := hcsshim.HNSListEndpointRequest()
|
||||
if err != nil {
|
||||
logger.Warn("Failed to collect network stats for containers")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if len(hnsEndpoints) == 0 {
|
||||
logger.Info("No network stats for containers to collect")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, endpoint := range hnsEndpoints {
|
||||
endpointStats, err := hcsshim.GetHNSEndpointStats(endpoint.Id)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to collect network stats for interface "+endpoint.Id,
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
for _, containerId := range endpoint.SharedContainers {
|
||||
containerIdWithPrefix, ok := containerPrefixes[containerId]
|
||||
|
||||
if !ok {
|
||||
logger.Debug("Failed to collect network stats for container " + containerId)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
endpointId := strings.ToUpper(endpoint.Id)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(endpointStats.BytesReceived),
|
||||
containerIdWithPrefix, endpointId,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesSent,
|
||||
prometheus.CounterValue,
|
||||
float64(endpointStats.BytesSent),
|
||||
containerIdWithPrefix, endpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(endpointStats.PacketsReceived),
|
||||
containerIdWithPrefix, endpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(endpointStats.PacketsSent),
|
||||
containerIdWithPrefix, endpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.droppedPacketsIncoming,
|
||||
prometheus.CounterValue,
|
||||
float64(endpointStats.DroppedPacketsIncoming),
|
||||
containerIdWithPrefix, endpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.droppedPacketsOutgoing,
|
||||
prometheus.CounterValue,
|
||||
float64(endpointStats.DroppedPacketsOutgoing),
|
||||
containerIdWithPrefix, endpointId,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerIdWithPrefix(containerDetails hcsshim.ContainerProperties) string {
|
||||
switch containerDetails.Owner {
|
||||
case "containerd-shim-runhcs-v1.exe":
|
||||
return "containerd://" + containerDetails.ID
|
||||
default:
|
||||
// default to docker or if owner is not set
|
||||
return "docker://" + containerDetails.ID
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package container_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/container"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, container.Name, container.NewWithFlags)
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package cpu
|
||||
|
||||
// Processor performance counters.
|
||||
const (
|
||||
c1TimeSeconds = "% C1 Time"
|
||||
c2TimeSeconds = "% C2 Time"
|
||||
c3TimeSeconds = "% C3 Time"
|
||||
c1TransitionsTotal = "C1 Transitions/sec"
|
||||
c2TransitionsTotal = "C2 Transitions/sec"
|
||||
c3TransitionsTotal = "C3 Transitions/sec"
|
||||
clockInterruptsTotal = "Clock Interrupts/sec"
|
||||
dpcQueuedPerSecond = "DPCs Queued/sec"
|
||||
dpcTimeSeconds = "% DPC Time"
|
||||
idleBreakEventsTotal = "Idle Break Events/sec"
|
||||
idleTimeSeconds = "% Idle Time"
|
||||
interruptsTotal = "Interrupts/sec"
|
||||
interruptTimeSeconds = "% Interrupt Time"
|
||||
parkingStatus = "Parking Status"
|
||||
performanceLimitPercent = "% Performance Limit"
|
||||
priorityTimeSeconds = "% Priority Time"
|
||||
privilegedTimeSeconds = "% Privileged Time"
|
||||
privilegedUtilitySeconds = "% Privileged Utility"
|
||||
processorFrequencyMHz = "Processor Frequency"
|
||||
processorPerformance = "% Processor Performance"
|
||||
processorTimeSeconds = "% Processor Time"
|
||||
processorUtilityRate = "% Processor Utility"
|
||||
userTimeSeconds = "% User Time"
|
||||
)
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorMPerf float64 `perflib:"% Processor Performance,secondvalue"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
ProcessorRTC float64 `perflib:"% Processor Utility,secondvalue"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
@@ -1,582 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "cpu"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
perfDataCollector *perfdata.Collector
|
||||
|
||||
processorRTCValues map[string]cpuCounter
|
||||
processorMPerfValues map[string]cpuCounter
|
||||
|
||||
logicalProcessors *prometheus.Desc
|
||||
cStateSecondsTotal *prometheus.Desc
|
||||
timeTotal *prometheus.Desc
|
||||
interruptsTotal *prometheus.Desc
|
||||
dpcsTotal *prometheus.Desc
|
||||
clockInterruptsTotal *prometheus.Desc
|
||||
idleBreakEventsTotal *prometheus.Desc
|
||||
parkingStatus *prometheus.Desc
|
||||
processorFrequencyMHz *prometheus.Desc
|
||||
processorPerformance *prometheus.Desc
|
||||
processorMPerf *prometheus.Desc
|
||||
processorRTC *prometheus.Desc
|
||||
processorUtility *prometheus.Desc
|
||||
processorPrivilegedUtility *prometheus.Desc
|
||||
}
|
||||
|
||||
type cpuCounter struct {
|
||||
lastValue uint32
|
||||
totalValue float64
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
if utils.PDHEnabled() {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return []string{"Processor Information"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
if utils.PDHEnabled() {
|
||||
counters := []string{
|
||||
c1TimeSeconds,
|
||||
c2TimeSeconds,
|
||||
c3TimeSeconds,
|
||||
c1TransitionsTotal,
|
||||
c2TransitionsTotal,
|
||||
c3TransitionsTotal,
|
||||
clockInterruptsTotal,
|
||||
dpcQueuedPerSecond,
|
||||
dpcTimeSeconds,
|
||||
idleBreakEventsTotal,
|
||||
idleTimeSeconds,
|
||||
interruptsTotal,
|
||||
interruptTimeSeconds,
|
||||
parkingStatus,
|
||||
performanceLimitPercent,
|
||||
priorityTimeSeconds,
|
||||
privilegedTimeSeconds,
|
||||
privilegedUtilitySeconds,
|
||||
processorFrequencyMHz,
|
||||
processorPerformance,
|
||||
processorTimeSeconds,
|
||||
processorUtilityRate,
|
||||
userTimeSeconds,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Processor Information", []string{"*"}, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.logicalProcessors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logical_processor"),
|
||||
"Total number of logical processors",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
c.cStateSecondsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
)
|
||||
c.timeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "time_total"),
|
||||
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
)
|
||||
c.interruptsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.dpcsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.cStateSecondsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
)
|
||||
c.timeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "time_total"),
|
||||
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
)
|
||||
c.interruptsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.dpcsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.clockInterruptsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.idleBreakEventsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.parkingStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.processorFrequencyMHz = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.processorPerformance = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processor_performance_total"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.processorMPerf = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processor_mperf_total"),
|
||||
"Processor MPerf is the number of TSC ticks incremented while executing instructions",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.processorRTC = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processor_rtc_total"),
|
||||
"Processor RTC represents the number of RTC ticks made since the system booted. It should consistently be 64e6, and can be used to properly derive Processor Utility Rate",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.processorUtility = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processor_utility_total"),
|
||||
"Processor Utility represents is the amount of time the core spends executing instructions",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
c.processorPrivilegedUtility = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processor_privileged_utility_total"),
|
||||
"Processor Privileged Utility represents is the amount of time the core has spent executing instructions inside the kernel",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.processorRTCValues = map[string]cpuCounter{}
|
||||
c.processorMPerfValues = map[string]cpuCounter{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
if utils.PDHEnabled() {
|
||||
return c.collectPDH(ch)
|
||||
}
|
||||
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
return c.collectFull(ctx, logger, ch)
|
||||
}
|
||||
|
||||
func (c *Collector) collectFull(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
|
||||
err := perflib.UnmarshalObject(ctx.PerfObjects["Processor Information"], &data, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var coreCount float64
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
core := cpu.Name
|
||||
|
||||
if val, ok := c.processorRTCValues[core]; ok {
|
||||
c.processorRTCValues[core] = cpuCounter{
|
||||
uint32(cpu.ProcessorRTC),
|
||||
val.totalValue + float64(uint32(cpu.ProcessorRTC)-val.lastValue),
|
||||
}
|
||||
} else {
|
||||
c.processorRTCValues[core] = cpuCounter{
|
||||
uint32(cpu.ProcessorRTC),
|
||||
0,
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := c.processorMPerfValues[core]; ok {
|
||||
c.processorMPerfValues[core] = cpuCounter{
|
||||
uint32(cpu.ProcessorMPerf),
|
||||
val.totalValue + float64(uint32(cpu.ProcessorMPerf)-val.lastValue),
|
||||
}
|
||||
} else {
|
||||
c.processorMPerfValues[core] = cpuCounter{
|
||||
uint32(cpu.ProcessorMPerf),
|
||||
0,
|
||||
}
|
||||
}
|
||||
|
||||
coreCount++
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.interruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dpcsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.idleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.parkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorPerformance,
|
||||
prometheus.CounterValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorMPerf,
|
||||
prometheus.CounterValue,
|
||||
c.processorMPerfValues[core].totalValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorRTC,
|
||||
prometheus.CounterValue,
|
||||
c.processorRTCValues[core].totalValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorUtility,
|
||||
prometheus.CounterValue,
|
||||
cpu.ProcessorUtilityRate,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorPrivilegedUtility,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedUtilitySeconds,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logicalProcessors,
|
||||
prometheus.GaugeValue,
|
||||
coreCount,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Processor Information metrics: %w", err)
|
||||
}
|
||||
|
||||
var coreCount float64
|
||||
|
||||
for core, coreData := range data {
|
||||
coreCount++
|
||||
|
||||
if val, ok := c.processorRTCValues[core]; ok {
|
||||
c.processorRTCValues[core] = cpuCounter{
|
||||
uint32(coreData[privilegedUtilitySeconds].SecondValue),
|
||||
val.totalValue + float64(uint32(coreData[privilegedUtilitySeconds].SecondValue)-val.lastValue),
|
||||
}
|
||||
} else {
|
||||
c.processorRTCValues[core] = cpuCounter{
|
||||
uint32(coreData[privilegedUtilitySeconds].SecondValue),
|
||||
0,
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := c.processorMPerfValues[core]; ok {
|
||||
c.processorMPerfValues[core] = cpuCounter{
|
||||
uint32(coreData[processorPerformance].SecondValue),
|
||||
val.totalValue + float64(uint32(coreData[processorPerformance].SecondValue)-val.lastValue),
|
||||
}
|
||||
} else {
|
||||
c.processorMPerfValues[core] = cpuCounter{
|
||||
uint32(coreData[processorPerformance].SecondValue),
|
||||
0,
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[c1TimeSeconds].FirstValue,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[c2TimeSeconds].FirstValue,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[c3TimeSeconds].FirstValue,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[idleTimeSeconds].FirstValue,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[interruptTimeSeconds].FirstValue,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[dpcTimeSeconds].FirstValue,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[privilegedTimeSeconds].FirstValue,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[userTimeSeconds].FirstValue,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.interruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[interruptsTotal].FirstValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dpcsTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[dpcQueuedPerSecond].FirstValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.clockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[clockInterruptsTotal].FirstValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.idleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
coreData[idleBreakEventsTotal].FirstValue,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.parkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
coreData[parkingStatus].FirstValue,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
coreData[processorFrequencyMHz].FirstValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorPerformance,
|
||||
prometheus.CounterValue,
|
||||
coreData[processorPerformance].FirstValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorMPerf,
|
||||
prometheus.CounterValue,
|
||||
coreData[processorPerformance].SecondValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorRTC,
|
||||
prometheus.CounterValue,
|
||||
coreData[processorUtilityRate].SecondValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorUtility,
|
||||
prometheus.CounterValue,
|
||||
coreData[processorUtilityRate].FirstValue,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorPrivilegedUtility,
|
||||
prometheus.CounterValue,
|
||||
coreData[privilegedUtilitySeconds].FirstValue,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logicalProcessors,
|
||||
prometheus.GaugeValue,
|
||||
coreCount,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package cpu_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, cpu.Name, cpu.NewWithFlags)
|
||||
}
|
||||
@@ -1,229 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package cpu_info
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "cpu_info"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_Processor.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
wmiClient *wmi.Client
|
||||
|
||||
cpuInfo *prometheus.Desc
|
||||
cpuCoreCount *prometheus.Desc
|
||||
cpuEnabledCoreCount *prometheus.Desc
|
||||
cpuLogicalProcessorsCount *prometheus.Desc
|
||||
cpuThreadCount *prometheus.Desc
|
||||
cpuL2CacheSize *prometheus.Desc
|
||||
cpuL3CacheSize *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
c.cpuInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "", Name),
|
||||
"Labelled CPU information as provided by Win32_Processor",
|
||||
[]string{
|
||||
"architecture",
|
||||
"device_id",
|
||||
"description",
|
||||
"family",
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.cpuThreadCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "thread"),
|
||||
"Number of threads per CPU",
|
||||
[]string{
|
||||
"device_id",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.cpuCoreCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "core"),
|
||||
"Number of cores per CPU",
|
||||
[]string{
|
||||
"device_id",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.cpuEnabledCoreCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "enabled_core"),
|
||||
"Number of enabled cores per CPU",
|
||||
[]string{
|
||||
"device_id",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.cpuLogicalProcessorsCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logical_processor"),
|
||||
"Number of logical processors per CPU",
|
||||
[]string{
|
||||
"device_id",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.cpuL2CacheSize = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "l2_cache_size"),
|
||||
"Size of L2 cache per CPU",
|
||||
[]string{
|
||||
"device_id",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.cpuL3CacheSize = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "l3_cache_size"),
|
||||
"Size of L3 cache per CPU",
|
||||
[]string{
|
||||
"device_id",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type win32Processor struct {
|
||||
Architecture uint32
|
||||
DeviceID string
|
||||
Description string
|
||||
Family uint16
|
||||
L2CacheSize uint32
|
||||
L3CacheSize uint32
|
||||
Name string
|
||||
ThreadCount uint32
|
||||
NumberOfCores uint32
|
||||
NumberOfEnabledCore uint32
|
||||
NumberOfLogicalProcessors uint32
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting cpu_info metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []win32Processor
|
||||
// We use a static query here because the provided methods in wmi.go all issue a SELECT *;
|
||||
// This results in the time-consuming LoadPercentage field being read which seems to measure each CPU
|
||||
// serially over a 1 second interval, so the scrape time is at least 1s * num_sockets
|
||||
if err := c.wmiClient.Query("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Some CPUs end up exposing trailing spaces for certain strings, so clean them up
|
||||
for _, processor := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuInfo,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
strconv.Itoa(int(processor.Architecture)),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
strings.TrimRight(processor.Description, " "),
|
||||
strconv.Itoa(int(processor.Family)),
|
||||
strings.TrimRight(processor.Name, " "),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuCoreCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(processor.NumberOfCores),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuEnabledCoreCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(processor.NumberOfEnabledCore),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuLogicalProcessorsCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(processor.NumberOfLogicalProcessors),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuThreadCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(processor.ThreadCount),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuL2CacheSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(processor.L2CacheSize),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuL3CacheSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(processor.L3CacheSize),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package cpu_info_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu_info"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, cpu_info.Name, cpu_info.NewWithFlags)
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package cs
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "cs"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
// physicalMemoryBytes
|
||||
// Deprecated: Use windows_cpu_logical_processor instead
|
||||
physicalMemoryBytes *prometheus.Desc
|
||||
// logicalProcessors
|
||||
// Deprecated: Use windows_physical_memory_total_bytes instead
|
||||
logicalProcessors *prometheus.Desc
|
||||
// hostname
|
||||
// Deprecated: Use windows_os_hostname instead
|
||||
hostname *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
|
||||
logger.Warn("The cs collector is deprecated and will be removed in a future release. " +
|
||||
"Logical processors has been moved to cpu_info collector. " +
|
||||
"Physical memory has been moved to memory collector. " +
|
||||
"Hostname has been moved to os collector.")
|
||||
|
||||
c.logicalProcessors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logical_processors"),
|
||||
"Deprecated: Use windows_cpu_logical_processor instead",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.physicalMemoryBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "physical_memory_bytes"),
|
||||
"Deprecated: Use windows_physical_memory_total_bytes instead",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.hostname = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "hostname"),
|
||||
"Deprecated: Use windows_os_hostname instead",
|
||||
[]string{
|
||||
"hostname",
|
||||
"domain",
|
||||
"fqdn",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting cs metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
// Get systeminfo for number of processors
|
||||
systemInfo := sysinfoapi.GetSystemInfo()
|
||||
|
||||
// Get memory status for physical memory
|
||||
mem, err := sysinfoapi.GlobalMemoryStatusEx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logicalProcessors,
|
||||
prometheus.GaugeValue,
|
||||
float64(systemInfo.NumberOfProcessors),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.physicalMemoryBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(mem.TotalPhys),
|
||||
)
|
||||
|
||||
hostname, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSHostname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
domain, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSDomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fqdn, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSFullyQualified)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.hostname,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
hostname,
|
||||
domain,
|
||||
fqdn,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package cs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, cs.Name, cs.NewWithFlags)
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
package dfsr
|
||||
|
||||
const (
|
||||
// Connection Perflib: "DFS Replication Service Connections".
|
||||
bytesReceivedTotal = "Total Bytes Received"
|
||||
|
||||
// Folder Perflib: "DFS Replicated Folder".
|
||||
bandwidthSavingsUsingDFSReplicationTotal = "Bandwidth Savings Using DFS Replication"
|
||||
compressedSizeOfFilesReceivedTotal = "Compressed Size of Files Received"
|
||||
conflictBytesCleanedUpTotal = "Conflict Bytes Cleaned Up"
|
||||
conflictBytesGeneratedTotal = "Conflict Bytes Generated"
|
||||
conflictFilesCleanedUpTotal = "Conflict Files Cleaned Up"
|
||||
conflictFilesGeneratedTotal = "Conflict Files Generated"
|
||||
conflictFolderCleanupsCompletedTotal = "Conflict folder Cleanups Completed"
|
||||
conflictSpaceInUse = "Conflict Space In Use"
|
||||
deletedSpaceInUse = "Deleted Space In Use"
|
||||
deletedBytesCleanedUpTotal = "Deleted Bytes Cleaned Up"
|
||||
deletedBytesGeneratedTotal = "Deleted Bytes Generated"
|
||||
deletedFilesCleanedUpTotal = "Deleted Files Cleaned Up"
|
||||
deletedFilesGeneratedTotal = "Deleted Files Generated"
|
||||
fileInstallsRetriedTotal = "File Installs Retried"
|
||||
fileInstallsSucceededTotal = "File Installs Succeeded"
|
||||
filesReceivedTotal = "Total Files Received"
|
||||
rdcBytesReceivedTotal = "RDC Bytes Received"
|
||||
rdcCompressedSizeOfFilesReceivedTotal = "RDC Compressed Size of Files Received"
|
||||
rdcNumberOfFilesReceivedTotal = "RDC Number of Files Received"
|
||||
rdcSizeOfFilesReceivedTotal = "RDC Size of Files Received"
|
||||
sizeOfFilesReceivedTotal = "Size of Files Received"
|
||||
stagingSpaceInUse = "Staging Space In Use"
|
||||
stagingBytesCleanedUpTotal = "Staging Bytes Cleaned Up"
|
||||
stagingBytesGeneratedTotal = "Staging Bytes Generated"
|
||||
stagingFilesCleanedUpTotal = "Staging Files Cleaned Up"
|
||||
stagingFilesGeneratedTotal = "Staging Files Generated"
|
||||
updatesDroppedTotal = "Updates Dropped"
|
||||
|
||||
// Volume Perflib: "DFS Replication Service Volumes".
|
||||
databaseCommitsTotal = "Database Commits"
|
||||
databaseLookupsTotal = "Database Lookups"
|
||||
usnJournalRecordsReadTotal = "USN Journal Records Read"
|
||||
usnJournalRecordsAcceptedTotal = "USN Journal Records Accepted"
|
||||
usnJournalUnreadPercentage = "USN Journal Records Unread Percentage"
|
||||
)
|
||||
|
||||
// PerflibDFSRConnection Perflib: "DFS Replication Service Connections".
|
||||
type PerflibDFSRConnection struct {
|
||||
Name string
|
||||
|
||||
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
|
||||
BytesReceivedTotal float64 `perflib:"Total Bytes Received"`
|
||||
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
|
||||
FilesReceivedTotal float64 `perflib:"Total Files Received"`
|
||||
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
|
||||
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
|
||||
RDCNumberOfFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
|
||||
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
|
||||
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
|
||||
}
|
||||
|
||||
// perflibDFSRFolder Perflib: "DFS Replicated Folder".
|
||||
type perflibDFSRFolder struct {
|
||||
Name string
|
||||
|
||||
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
|
||||
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
|
||||
ConflictBytesCleanedUpTotal float64 `perflib:"Conflict Bytes Cleaned Up"`
|
||||
ConflictBytesGeneratedTotal float64 `perflib:"Conflict Bytes Generated"`
|
||||
ConflictFilesCleanedUpTotal float64 `perflib:"Conflict Files Cleaned Up"`
|
||||
ConflictFilesGeneratedTotal float64 `perflib:"Conflict Files Generated"`
|
||||
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict folder Cleanups Completed"`
|
||||
ConflictSpaceInUse float64 `perflib:"Conflict Space In Use"`
|
||||
DeletedSpaceInUse float64 `perflib:"Deleted Space In Use"`
|
||||
DeletedBytesCleanedUpTotal float64 `perflib:"Deleted Bytes Cleaned Up"`
|
||||
DeletedBytesGeneratedTotal float64 `perflib:"Deleted Bytes Generated"`
|
||||
DeletedFilesCleanedUpTotal float64 `perflib:"Deleted Files Cleaned Up"`
|
||||
DeletedFilesGeneratedTotal float64 `perflib:"Deleted Files Generated"`
|
||||
FileInstallsRetriedTotal float64 `perflib:"File Installs Retried"`
|
||||
FileInstallsSucceededTotal float64 `perflib:"File Installs Succeeded"`
|
||||
FilesReceivedTotal float64 `perflib:"Total Files Received"`
|
||||
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
|
||||
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
|
||||
RDCNumberOfFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
|
||||
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
|
||||
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
|
||||
StagingSpaceInUse float64 `perflib:"Staging Space In Use"`
|
||||
StagingBytesCleanedUpTotal float64 `perflib:"Staging Bytes Cleaned Up"`
|
||||
StagingBytesGeneratedTotal float64 `perflib:"Staging Bytes Generated"`
|
||||
StagingFilesCleanedUpTotal float64 `perflib:"Staging Files Cleaned Up"`
|
||||
StagingFilesGeneratedTotal float64 `perflib:"Staging Files Generated"`
|
||||
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
|
||||
}
|
||||
|
||||
// perflibDFSRVolume Perflib: "DFS Replication Service Volumes".
|
||||
type perflibDFSRVolume struct {
|
||||
Name string
|
||||
|
||||
DatabaseCommitsTotal float64 `perflib:"Database Commits"`
|
||||
DatabaseLookupsTotal float64 `perflib:"Database Lookups"`
|
||||
USNJournalRecordsReadTotal float64 `perflib:"USN Journal Records Read"`
|
||||
USNJournalRecordsAcceptedTotal float64 `perflib:"USN Journal Records Accepted"`
|
||||
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,12 +0,0 @@
|
||||
package dfsr_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dfsr"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, dfsr.Name, dfsr.NewWithFlags)
|
||||
}
|
||||
@@ -1,426 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package dhcp
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "dhcp"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector perflib DHCP metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
acksTotal *prometheus.Desc
|
||||
activeQueueLength *prometheus.Desc
|
||||
conflictCheckQueueLength *prometheus.Desc
|
||||
declinesTotal *prometheus.Desc
|
||||
deniedDueToMatch *prometheus.Desc
|
||||
deniedDueToNonMatch *prometheus.Desc
|
||||
discoversTotal *prometheus.Desc
|
||||
duplicatesDroppedTotal *prometheus.Desc
|
||||
failoverBndackReceivedTotal *prometheus.Desc
|
||||
failoverBndackSentTotal *prometheus.Desc
|
||||
failoverBndupdDropped *prometheus.Desc
|
||||
failoverBndupdPendingOutboundQueue *prometheus.Desc
|
||||
failoverBndupdReceivedTotal *prometheus.Desc
|
||||
failoverBndupdSentTotal *prometheus.Desc
|
||||
failoverTransitionsCommunicationInterruptedState *prometheus.Desc
|
||||
failoverTransitionsPartnerDownState *prometheus.Desc
|
||||
failoverTransitionsRecoverState *prometheus.Desc
|
||||
informsTotal *prometheus.Desc
|
||||
nACKsTotal *prometheus.Desc
|
||||
offerQueueLength *prometheus.Desc
|
||||
offersTotal *prometheus.Desc
|
||||
packetsExpiredTotal *prometheus.Desc
|
||||
packetsReceivedTotal *prometheus.Desc
|
||||
releasesTotal *prometheus.Desc
|
||||
requestsTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"DHCP Server"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
c.packetsReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
|
||||
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.duplicatesDroppedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "duplicates_dropped_total"),
|
||||
"Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.packetsExpiredTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_expired_total"),
|
||||
"Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.activeQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "active_queue_length"),
|
||||
"Number of packets in the processing queue of the DHCP server (ActiveQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.conflictCheckQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "conflict_check_queue_length"),
|
||||
"Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.discoversTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "discovers_total"),
|
||||
"Total DHCP Discovers received by the DHCP server (DiscoversTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.offersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "offers_total"),
|
||||
"Total DHCP Offers sent by the DHCP server (OffersTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.requestsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Total DHCP Requests received by the DHCP server (RequestsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.informsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "informs_total"),
|
||||
"Total DHCP Informs received by the DHCP server (InformsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.acksTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "acks_total"),
|
||||
"Total DHCP Acks sent by the DHCP server (AcksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.nACKsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "nacks_total"),
|
||||
"Total DHCP Nacks sent by the DHCP server (NacksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.declinesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "declines_total"),
|
||||
"Total DHCP Declines received by the DHCP server (DeclinesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.releasesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "releases_total"),
|
||||
"Total DHCP Releases received by the DHCP server (ReleasesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.offerQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "offer_queue_length"),
|
||||
"Number of packets in the offer queue of the DHCP server (OfferQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.deniedDueToMatch = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_match_total"),
|
||||
"Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.deniedDueToNonMatch = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "denied_due_to_nonmatch_total"),
|
||||
"Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndupdSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_sent_total"),
|
||||
"Number of DHCP fail over Binding Update messages sent (FailoverBndupdSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndupdReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_received_total"),
|
||||
"Number of DHCP fail over Binding Update messages received (FailoverBndupdReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndackSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_sent_total"),
|
||||
"Number of DHCP fail over Binding Ack messages sent (FailoverBndackSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndackReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndack_received_total"),
|
||||
"Number of DHCP fail over Binding Ack messages received (FailoverBndackReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndupdPendingOutboundQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_pending_in_outbound_queue"),
|
||||
"Number of pending outbound DHCP fail over Binding Update messages (FailoverBndupdPendingOutboundQueue)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsCommunicationInterruptedState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_communicationinterrupted_state_total"),
|
||||
"Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsPartnerDownState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_partnerdown_state_total"),
|
||||
"Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverTransitionsRecoverState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_transitions_recover_total"),
|
||||
"Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.failoverBndupdDropped = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "failover_bndupd_dropped_total"),
|
||||
"Total number of DHCP fail over Binding Updates dropped (FailoverBndupdDropped)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// represents perflib metrics from the DHCP Server class.
|
||||
// While the name of a number of perflib metrics would indicate a rate is being returned (E.G. Packets Received/sec),
|
||||
// perflib instead returns a counter, hence the "Total" suffix in some of the variable names.
|
||||
type dhcpPerf struct {
|
||||
PacketsReceivedTotal float64 `perflib:"Packets Received/sec"`
|
||||
DuplicatesDroppedTotal float64 `perflib:"Duplicates Dropped/sec"`
|
||||
PacketsExpiredTotal float64 `perflib:"Packets Expired/sec"`
|
||||
ActiveQueueLength float64 `perflib:"Active Queue Length"`
|
||||
ConflictCheckQueueLength float64 `perflib:"Conflict Check Queue Length"`
|
||||
DiscoversTotal float64 `perflib:"Discovers/sec"`
|
||||
OffersTotal float64 `perflib:"Offers/sec"`
|
||||
RequestsTotal float64 `perflib:"Requests/sec"`
|
||||
InformsTotal float64 `perflib:"Informs/sec"`
|
||||
AcksTotal float64 `perflib:"Acks/sec"`
|
||||
NacksTotal float64 `perflib:"Nacks/sec"`
|
||||
DeclinesTotal float64 `perflib:"Declines/sec"`
|
||||
ReleasesTotal float64 `perflib:"Releases/sec"`
|
||||
DeniedDueToMatch float64 `perflib:"Denied due to match."`
|
||||
DeniedDueToNonMatch float64 `perflib:"Denied due to match."`
|
||||
OfferQueueLength float64 `perflib:"Offer Queue Length"`
|
||||
FailoverBndupdSentTotal float64 `perflib:"Failover: BndUpd sent/sec."`
|
||||
FailoverBndupdReceivedTotal float64 `perflib:"Failover: BndUpd received/sec."`
|
||||
FailoverBndackSentTotal float64 `perflib:"Failover: BndAck sent/sec."`
|
||||
FailoverBndackReceivedTotal float64 `perflib:"Failover: BndAck received/sec."`
|
||||
FailoverBndupdPendingOutboundQueue float64 `perflib:"Failover: BndUpd pending in outbound queue."`
|
||||
FailoverTransitionsCommunicationinterruptedState float64 `perflib:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
|
||||
FailoverTransitionsPartnerdownState float64 `perflib:"Failover: Transitions to PARTNER-DOWN state."`
|
||||
FailoverTransitionsRecoverState float64 `perflib:"Failover: Transitions to RECOVER state."`
|
||||
FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."`
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var dhcpPerfs []dhcpPerf
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["DHCP Server"], &dhcpPerfs, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].PacketsReceivedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.duplicatesDroppedTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].DuplicatesDroppedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsExpiredTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].PacketsExpiredTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.activeQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
dhcpPerfs[0].ActiveQueueLength,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.conflictCheckQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
dhcpPerfs[0].ConflictCheckQueueLength,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.discoversTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].DiscoversTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.offersTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].OffersTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestsTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].RequestsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.informsTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].InformsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.acksTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].AcksTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nACKsTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].NacksTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.declinesTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].DeclinesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.releasesTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].ReleasesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.offerQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
dhcpPerfs[0].OfferQueueLength,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.deniedDueToMatch,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].DeniedDueToMatch,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.deniedDueToNonMatch,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].DeniedDueToNonMatch,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverBndupdSentTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverBndupdSentTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverBndupdReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverBndupdReceivedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverBndackSentTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverBndackSentTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverBndackReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverBndackReceivedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverBndupdPendingOutboundQueue,
|
||||
prometheus.GaugeValue,
|
||||
dhcpPerfs[0].FailoverBndupdPendingOutboundQueue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverTransitionsCommunicationInterruptedState,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverTransitionsCommunicationinterruptedState,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverTransitionsPartnerDownState,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverTransitionsPartnerdownState,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverTransitionsRecoverState,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverTransitionsRecoverState,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.failoverBndupdDropped,
|
||||
prometheus.CounterValue,
|
||||
dhcpPerfs[0].FailoverBndupdDropped,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package dhcp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dhcp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, dhcp.Name, dhcp.NewWithFlags)
|
||||
}
|
||||
@@ -1,243 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package diskdrive
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const (
|
||||
Name = "diskdrive"
|
||||
win32DiskQuery = "SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive"
|
||||
)
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive.
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
availability *prometheus.Desc
|
||||
diskInfo *prometheus.Desc
|
||||
partitions *prometheus.Desc
|
||||
size *prometheus.Desc
|
||||
status *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
c.diskInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"General drive information",
|
||||
[]string{
|
||||
"device_id",
|
||||
"model",
|
||||
"caption",
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.status = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "status"),
|
||||
"Status of the drive",
|
||||
[]string{"name", "status"},
|
||||
nil,
|
||||
)
|
||||
c.size = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "size"),
|
||||
"Size of the disk drive. It is calculated by multiplying the total number of cylinders, tracks in each cylinder, sectors in each track, and bytes in each sector.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.partitions = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "partitions"),
|
||||
"Number of partitions",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.availability = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "availability"),
|
||||
"Availability Status",
|
||||
[]string{"name", "availability"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type win32_DiskDrive struct {
|
||||
DeviceID string
|
||||
Model string
|
||||
Size uint64
|
||||
Name string
|
||||
Caption string
|
||||
Partitions uint32
|
||||
Status string
|
||||
Availability uint16
|
||||
}
|
||||
|
||||
var (
|
||||
allDiskStatus = []string{
|
||||
"OK",
|
||||
"Error",
|
||||
"Degraded",
|
||||
"Unknown",
|
||||
"Pred fail",
|
||||
"Starting",
|
||||
"Stopping",
|
||||
"Service",
|
||||
"Stressed",
|
||||
"Nonrecover",
|
||||
"No Contact",
|
||||
"Lost Comm",
|
||||
}
|
||||
|
||||
availMap = map[int]string{
|
||||
1: "Other",
|
||||
2: "Unknown",
|
||||
3: "Running / Full Power",
|
||||
4: "Warning",
|
||||
5: "In Test",
|
||||
6: "Not Applicable",
|
||||
7: "Power Off",
|
||||
8: "Off line",
|
||||
9: "Off Duty",
|
||||
10: "Degraded",
|
||||
11: "Not Installed",
|
||||
12: "Install Error",
|
||||
13: "Power Save - Unknown",
|
||||
14: "Power Save - Low Power Mode",
|
||||
15: "Power Save - Standby",
|
||||
16: "Power Cycle",
|
||||
17: "Power Save - Warning",
|
||||
18: "Paused",
|
||||
19: "Not Ready",
|
||||
20: "Not Configured",
|
||||
21: "Quiesced",
|
||||
}
|
||||
)
|
||||
|
||||
// Collect sends the metric values for each metric to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting disk_drive_info metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []win32_DiskDrive
|
||||
|
||||
if err := c.wmiClient.Query(win32DiskQuery, &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
for _, disk := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.diskInfo,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
strings.Trim(disk.DeviceID, "\\.\\"), //nolint:staticcheck
|
||||
strings.TrimRight(disk.Model, " "),
|
||||
strings.TrimRight(disk.Caption, " "),
|
||||
strings.TrimRight(disk.Name, "\\.\\"), //nolint:staticcheck
|
||||
)
|
||||
|
||||
for _, status := range allDiskStatus {
|
||||
isCurrentState := 0.0
|
||||
if status == disk.Status {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.status,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
|
||||
status,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.size,
|
||||
prometheus.GaugeValue,
|
||||
float64(disk.Size),
|
||||
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.partitions,
|
||||
prometheus.GaugeValue,
|
||||
float64(disk.Partitions),
|
||||
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
|
||||
)
|
||||
|
||||
for availNum, val := range availMap {
|
||||
isCurrentState := 0.0
|
||||
if availNum == int(disk.Availability) {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availability,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
|
||||
val,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package diskdrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/diskdrive"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, diskdrive.Name, diskdrive.NewWithFlags)
|
||||
}
|
||||
@@ -1,542 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "dns"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
dynamicUpdatesFailures *prometheus.Desc
|
||||
dynamicUpdatesQueued *prometheus.Desc
|
||||
dynamicUpdatesReceived *prometheus.Desc
|
||||
memoryUsedBytes *prometheus.Desc
|
||||
notifyReceived *prometheus.Desc
|
||||
notifySent *prometheus.Desc
|
||||
queries *prometheus.Desc
|
||||
recursiveQueries *prometheus.Desc
|
||||
recursiveQueryFailures *prometheus.Desc
|
||||
recursiveQuerySendTimeouts *prometheus.Desc
|
||||
responses *prometheus.Desc
|
||||
secureUpdateFailures *prometheus.Desc
|
||||
secureUpdateReceived *prometheus.Desc
|
||||
unmatchedResponsesReceived *prometheus.Desc
|
||||
winsQueries *prometheus.Desc
|
||||
winsResponses *prometheus.Desc
|
||||
zoneTransferFailures *prometheus.Desc
|
||||
zoneTransferRequestsReceived *prometheus.Desc
|
||||
zoneTransferRequestsSent *prometheus.Desc
|
||||
zoneTransferResponsesReceived *prometheus.Desc
|
||||
zoneTransferSuccessReceived *prometheus.Desc
|
||||
zoneTransferSuccessSent *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
|
||||
c.zoneTransferRequestsReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
|
||||
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
)
|
||||
c.zoneTransferRequestsSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_sent_total"),
|
||||
"Number of zone transfer requests (AXFR/IXFR) sent by the secondary DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
)
|
||||
c.zoneTransferResponsesReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_response_received_total"),
|
||||
"Number of zone transfer responses (AXFR/IXFR) received by the secondary DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
)
|
||||
c.zoneTransferSuccessReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_success_received_total"),
|
||||
"Number of successful zone transfers (AXFR/IXFR) received by the secondary DNS server",
|
||||
[]string{"qtype", "protocol"},
|
||||
nil,
|
||||
)
|
||||
c.zoneTransferSuccessSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_success_sent_total"),
|
||||
"Number of successful zone transfers (AXFR/IXFR) of the master DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
)
|
||||
c.zoneTransferFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_failures_total"),
|
||||
"Number of failed zone transfers of the master DNS server",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.memoryUsedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "memory_used_bytes"),
|
||||
"Current memory used by DNS server",
|
||||
[]string{"area"},
|
||||
nil,
|
||||
)
|
||||
c.dynamicUpdatesQueued = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "dynamic_updates_queued"),
|
||||
"Number of dynamic updates queued by the DNS server",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.dynamicUpdatesReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "dynamic_updates_received_total"),
|
||||
"Number of secure update requests received by the DNS server",
|
||||
[]string{"operation"},
|
||||
nil,
|
||||
)
|
||||
c.dynamicUpdatesFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "dynamic_updates_failures_total"),
|
||||
"Number of dynamic updates which timed out or were rejected by the DNS server",
|
||||
[]string{"reason"},
|
||||
nil,
|
||||
)
|
||||
c.notifyReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "notify_received_total"),
|
||||
"Number of notifies received by the secondary DNS server",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.notifySent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "notify_sent_total"),
|
||||
"Number of notifies sent by the master DNS server",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.secureUpdateFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "secure_update_failures_total"),
|
||||
"Number of secure updates that failed on the DNS server",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.secureUpdateReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "secure_update_received_total"),
|
||||
"Number of secure update requests received by the DNS server",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.queries = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "queries_total"),
|
||||
"Number of queries received by DNS server",
|
||||
[]string{"protocol"},
|
||||
nil,
|
||||
)
|
||||
c.responses = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "responses_total"),
|
||||
"Number of responses sent by DNS server",
|
||||
[]string{"protocol"},
|
||||
nil,
|
||||
)
|
||||
c.recursiveQueries = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "recursive_queries_total"),
|
||||
"Number of recursive queries received by DNS server",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.recursiveQueryFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "recursive_query_failures_total"),
|
||||
"Number of recursive query failures",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.recursiveQuerySendTimeouts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "recursive_query_send_timeouts_total"),
|
||||
"Number of recursive query sending timeouts",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.winsQueries = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "wins_queries_total"),
|
||||
"Number of WINS lookup requests received by the server",
|
||||
[]string{"direction"},
|
||||
nil,
|
||||
)
|
||||
c.winsResponses = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "wins_responses_total"),
|
||||
"Number of WINS lookup responses sent by the server",
|
||||
[]string{"direction"},
|
||||
nil,
|
||||
)
|
||||
c.unmatchedResponsesReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "unmatched_responses_total"),
|
||||
"Number of response packets received by the DNS server that do not match any outstanding remote query",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting dns metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_DNS_DNS docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// - https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
type Win32_PerfRawData_DNS_DNS struct {
|
||||
AXFRRequestReceived uint32
|
||||
AXFRRequestSent uint32
|
||||
AXFRResponseReceived uint32
|
||||
AXFRSuccessReceived uint32
|
||||
AXFRSuccessSent uint32
|
||||
CachingMemory uint32
|
||||
DatabaseNodeMemory uint32
|
||||
DynamicUpdateNoOperation uint32
|
||||
DynamicUpdateQueued uint32
|
||||
DynamicUpdateRejected uint32
|
||||
DynamicUpdateTimeOuts uint32
|
||||
DynamicUpdateWrittentoDatabase uint32
|
||||
IXFRRequestReceived uint32
|
||||
IXFRRequestSent uint32
|
||||
IXFRResponseReceived uint32
|
||||
IXFRSuccessSent uint32
|
||||
IXFRTCPSuccessReceived uint32
|
||||
IXFRUDPSuccessReceived uint32
|
||||
NbstatMemory uint32
|
||||
NotifyReceived uint32
|
||||
NotifySent uint32
|
||||
RecordFlowMemory uint32
|
||||
RecursiveQueries uint32
|
||||
RecursiveQueryFailure uint32
|
||||
RecursiveSendTimeOuts uint32
|
||||
SecureUpdateFailure uint32
|
||||
SecureUpdateReceived uint32
|
||||
TCPMessageMemory uint32
|
||||
TCPQueryReceived uint32
|
||||
TCPResponseSent uint32
|
||||
UDPMessageMemory uint32
|
||||
UDPQueryReceived uint32
|
||||
UDPResponseSent uint32
|
||||
UnmatchedResponsesReceived uint32
|
||||
WINSLookupReceived uint32
|
||||
WINSResponseSent uint32
|
||||
WINSReverseLookupReceived uint32
|
||||
WINSReverseResponseSent uint32
|
||||
ZoneTransferFailure uint32
|
||||
ZoneTransferSOARequestSent uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_DNS_DNS
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_DNS_DNS", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferRequestsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRRequestReceived),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferRequestsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRRequestReceived),
|
||||
"incremental",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferRequestsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRRequestSent),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferRequestsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRRequestSent),
|
||||
"incremental",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferRequestsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ZoneTransferSOARequestSent),
|
||||
"soa",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferResponsesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRResponseReceived),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferResponsesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRResponseReceived),
|
||||
"incremental",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferSuccessReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRSuccessReceived),
|
||||
"full",
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferSuccessReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRTCPSuccessReceived),
|
||||
"incremental",
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferSuccessReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRTCPSuccessReceived),
|
||||
"incremental",
|
||||
"udp",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferSuccessSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRSuccessSent),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferSuccessSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRSuccessSent),
|
||||
"incremental",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.zoneTransferFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ZoneTransferFailure),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CachingMemory),
|
||||
"caching",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].DatabaseNodeMemory),
|
||||
"database_node",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].NbstatMemory),
|
||||
"nbstat",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].RecordFlowMemory),
|
||||
"record_flow",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TCPMessageMemory),
|
||||
"tcp_message",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.memoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].UDPMessageMemory),
|
||||
"udp_message",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dynamicUpdatesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateNoOperation),
|
||||
"noop",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dynamicUpdatesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateWrittentoDatabase),
|
||||
"written",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dynamicUpdatesQueued,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].DynamicUpdateQueued),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dynamicUpdatesFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateRejected),
|
||||
"rejected",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dynamicUpdatesFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateTimeOuts),
|
||||
"timeout",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.notifyReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].NotifyReceived),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.notifySent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].NotifySent),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.recursiveQueries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].RecursiveQueries),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.recursiveQueryFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].RecursiveQueryFailure),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.recursiveQuerySendTimeouts,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].RecursiveSendTimeOuts),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.queries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].TCPQueryReceived),
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.queries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].UDPQueryReceived),
|
||||
"udp",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.responses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].TCPResponseSent),
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.responses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].UDPResponseSent),
|
||||
"udp",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.unmatchedResponsesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].UnmatchedResponsesReceived),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.winsQueries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSLookupReceived),
|
||||
"forward",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.winsQueries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSReverseLookupReceived),
|
||||
"reverse",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.winsResponses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSResponseSent),
|
||||
"forward",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.winsResponses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSReverseResponseSent),
|
||||
"reverse",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.secureUpdateFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SecureUpdateFailure),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.secureUpdateReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SecureUpdateReceived),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package dns_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dns"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, dns.Name, dns.NewWithFlags)
|
||||
}
|
||||
@@ -1,745 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "exchange"
|
||||
|
||||
type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
"ADAccessProcesses",
|
||||
"TransportQueues",
|
||||
"HttpProxy",
|
||||
"ActiveSync",
|
||||
"AvailabilityService",
|
||||
"OutlookWebAccess",
|
||||
"Autodiscover",
|
||||
"WorkloadManagement",
|
||||
"RpcClientAccess",
|
||||
"MapiHttpEmsmdb",
|
||||
},
|
||||
}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
activeMailboxDeliveryQueueLength *prometheus.Desc
|
||||
activeSyncRequestsPerSec *prometheus.Desc
|
||||
activeTasks *prometheus.Desc
|
||||
activeUserCount *prometheus.Desc
|
||||
activeUserCountMapiHttpEmsMDB *prometheus.Desc
|
||||
autoDiscoverRequestsPerSec *prometheus.Desc
|
||||
availabilityRequestsSec *prometheus.Desc
|
||||
averageAuthenticationLatency *prometheus.Desc
|
||||
averageCASProcessingLatency *prometheus.Desc
|
||||
completedTasks *prometheus.Desc
|
||||
connectionCount *prometheus.Desc
|
||||
currentUniqueUsers *prometheus.Desc
|
||||
externalActiveRemoteDeliveryQueueLength *prometheus.Desc
|
||||
externalLargestDeliveryQueueLength *prometheus.Desc
|
||||
internalActiveRemoteDeliveryQueueLength *prometheus.Desc
|
||||
internalLargestDeliveryQueueLength *prometheus.Desc
|
||||
isActive *prometheus.Desc
|
||||
ldapReadTime *prometheus.Desc
|
||||
ldapSearchTime *prometheus.Desc
|
||||
ldapTimeoutErrorsPerSec *prometheus.Desc
|
||||
ldapWriteTime *prometheus.Desc
|
||||
longRunningLDAPOperationsPerMin *prometheus.Desc
|
||||
mailboxServerLocatorAverageLatency *prometheus.Desc
|
||||
mailboxServerProxyFailureRate *prometheus.Desc
|
||||
outstandingProxyRequests *prometheus.Desc
|
||||
owaRequestsPerSec *prometheus.Desc
|
||||
pingCommandsPending *prometheus.Desc
|
||||
poisonQueueLength *prometheus.Desc
|
||||
proxyRequestsPerSec *prometheus.Desc
|
||||
queuedTasks *prometheus.Desc
|
||||
retryMailboxDeliveryQueueLength *prometheus.Desc
|
||||
rpcAveragedLatency *prometheus.Desc
|
||||
rpcOperationsPerSec *prometheus.Desc
|
||||
rpcRequests *prometheus.Desc
|
||||
syncCommandsPerSec *prometheus.Desc
|
||||
unreachableQueueLength *prometheus.Desc
|
||||
userCount *prometheus.Desc
|
||||
yieldedTasks *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.CollectorsEnabled == nil {
|
||||
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
c.config.CollectorsEnabled = make([]string, 0)
|
||||
|
||||
var listAllCollectors bool
|
||||
|
||||
var collectorsEnabled string
|
||||
|
||||
app.Flag(
|
||||
"collector.exchange.list",
|
||||
"List the collectors along with their perflib object name/ids",
|
||||
).BoolVar(&listAllCollectors)
|
||||
|
||||
app.Flag(
|
||||
"collector.exchange.enabled",
|
||||
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
|
||||
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
|
||||
|
||||
app.PreAction(func(*kingpin.ParseContext) error {
|
||||
if listAllCollectors {
|
||||
collectorDesc := map[string]string{
|
||||
"ADAccessProcesses": "[19108] MSExchange ADAccess Processes",
|
||||
"TransportQueues": "[20524] MSExchangeTransport Queues",
|
||||
"HttpProxy": "[36934] MSExchange HttpProxy",
|
||||
"ActiveSync": "[25138] MSExchange ActiveSync",
|
||||
"AvailabilityService": "[24914] MSExchange Availability Service",
|
||||
"OutlookWebAccess": "[24618] MSExchange OWA",
|
||||
"Autodiscover": "[29240] MSExchange Autodiscover",
|
||||
"WorkloadManagement": "[19430] MSExchange WorkloadManagement Workloads",
|
||||
"RpcClientAccess": "[29336] MSExchange RpcClientAccess",
|
||||
"MapiHttpEmsmdb": "[26463] MSExchange MapiHttp Emsmdb",
|
||||
}
|
||||
|
||||
sb := strings.Builder{}
|
||||
sb.WriteString(fmt.Sprintf("%-32s %-32s\n", "Collector Name", "[PerfID] Perflib Object"))
|
||||
|
||||
for _, cname := range ConfigDefaults.CollectorsEnabled {
|
||||
sb.WriteString(fmt.Sprintf("%-32s %-32s\n", cname, collectorDesc[cname]))
|
||||
}
|
||||
|
||||
app.UsageTemplate(sb.String()).Usage(nil)
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{
|
||||
"MSExchange ADAccess Processes",
|
||||
"MSExchangeTransport Queues",
|
||||
"MSExchange HttpProxy",
|
||||
"MSExchange ActiveSync",
|
||||
"MSExchange Availability Service",
|
||||
"MSExchange OWA",
|
||||
"MSExchangeAutodiscover",
|
||||
"MSExchange WorkloadManagement Workloads",
|
||||
"MSExchange RpcClientAccess",
|
||||
"MSExchange MapiHttp Emsmdb",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
// desc creates a new prometheus description
|
||||
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "exchange", metricName),
|
||||
description,
|
||||
labels,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
c.rpcAveragedLatency = desc("rpc_avg_latency_sec", "The latency (sec) averaged for the past 1024 packets")
|
||||
c.rpcRequests = desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service")
|
||||
c.activeUserCount = desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes")
|
||||
c.connectionCount = desc("rpc_connection_count", "Total number of client connections maintained")
|
||||
c.rpcOperationsPerSec = desc("rpc_operations_total", "The rate at which RPC operations occur")
|
||||
c.userCount = desc("rpc_user_count", "Number of users")
|
||||
c.ldapReadTime = desc("ldap_read_time_sec", "Time (sec) to send an LDAP read request and receive a response", "name")
|
||||
c.ldapSearchTime = desc("ldap_search_time_sec", "Time (sec) to send an LDAP search request and receive a response", "name")
|
||||
c.ldapWriteTime = desc("ldap_write_time_sec", "Time (sec) to send an LDAP Add/Modify/Delete request and receive a response", "name")
|
||||
c.ldapTimeoutErrorsPerSec = desc("ldap_timeout_errors_total", "Total number of LDAP timeout errors", "name")
|
||||
c.longRunningLDAPOperationsPerMin = desc("ldap_long_running_ops_per_sec", "Long Running LDAP operations per second", "name")
|
||||
c.externalActiveRemoteDeliveryQueueLength = desc("transport_queues_external_active_remote_delivery", "External Active Remote Delivery Queue length", "name")
|
||||
c.internalActiveRemoteDeliveryQueueLength = desc("transport_queues_internal_active_remote_delivery", "Internal Active Remote Delivery Queue length", "name")
|
||||
c.activeMailboxDeliveryQueueLength = desc("transport_queues_active_mailbox_delivery", "Active Mailbox Delivery Queue length", "name")
|
||||
c.retryMailboxDeliveryQueueLength = desc("transport_queues_retry_mailbox_delivery", "Retry Mailbox Delivery Queue length", "name")
|
||||
c.unreachableQueueLength = desc("transport_queues_unreachable", "Unreachable Queue length", "name")
|
||||
c.externalLargestDeliveryQueueLength = desc("transport_queues_external_largest_delivery", "External Largest Delivery Queue length", "name")
|
||||
c.internalLargestDeliveryQueueLength = desc("transport_queues_internal_largest_delivery", "Internal Largest Delivery Queue length", "name")
|
||||
c.poisonQueueLength = desc("transport_queues_poison", "Poison Queue length", "name")
|
||||
c.mailboxServerLocatorAverageLatency = desc("http_proxy_mailbox_server_locator_avg_latency_sec", "Average latency (sec) of MailboxServerLocator web service calls", "name")
|
||||
c.averageAuthenticationLatency = desc("http_proxy_avg_auth_latency", "Average time spent authenticating CAS requests over the last 200 samples", "name")
|
||||
c.outstandingProxyRequests = desc("http_proxy_outstanding_proxy_requests", "Number of concurrent outstanding proxy requests", "name")
|
||||
c.proxyRequestsPerSec = desc("http_proxy_requests_total", "Number of proxy requests processed each second", "name")
|
||||
c.availabilityRequestsSec = desc("avail_service_requests_per_sec", "Number of requests serviced per second")
|
||||
c.currentUniqueUsers = desc("owa_current_unique_users", "Number of unique users currently logged on to Outlook Web App")
|
||||
c.owaRequestsPerSec = desc("owa_requests_total", "Number of requests handled by Outlook Web App per second")
|
||||
c.autoDiscoverRequestsPerSec = desc("autodiscover_requests_total", "Number of autodiscover service requests processed each second")
|
||||
c.activeTasks = desc("workload_active_tasks", "Number of active tasks currently running in the background for workload management", "name")
|
||||
c.completedTasks = desc("workload_completed_tasks", "Number of workload management tasks that have been completed", "name")
|
||||
c.queuedTasks = desc("workload_queued_tasks", "Number of workload management tasks that are currently queued up waiting to be processed", "name")
|
||||
c.yieldedTasks = desc("workload_yielded_tasks", "The total number of tasks that have been yielded by a workload", "name")
|
||||
c.isActive = desc("workload_is_active", "Active indicates whether the workload is in an active (1) or paused (0) state", "name")
|
||||
c.activeSyncRequestsPerSec = desc("activesync_requests_total", "Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load")
|
||||
c.averageCASProcessingLatency = desc("http_proxy_avg_cas_processing_latency_sec", "Average latency (sec) of CAS processing time over the last 200 reqs", "name")
|
||||
c.mailboxServerProxyFailureRate = desc("http_proxy_mailbox_proxy_failure_rate", "% of failures between this CAS and MBX servers over the last 200 samples", "name")
|
||||
c.pingCommandsPending = desc("activesync_ping_cmds_pending", "Number of ping commands currently pending in the queue")
|
||||
c.syncCommandsPerSec = desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder")
|
||||
c.activeUserCountMapiHttpEmsMDB = desc("mapihttp_emsmdb_active_user_count", "Number of unique outlook users that have shown some kind of activity in the last 2 minutes")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect collects exchange metrics and sends them to prometheus.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
collectorFuncs := map[string]func(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error{
|
||||
"ADAccessProcesses": c.collectADAccessProcesses,
|
||||
"TransportQueues": c.collectTransportQueues,
|
||||
"HttpProxy": c.collectHTTPProxy,
|
||||
"ActiveSync": c.collectActiveSync,
|
||||
"AvailabilityService": c.collectAvailabilityService,
|
||||
"OutlookWebAccess": c.collectOWA,
|
||||
"Autodiscover": c.collectAutoDiscover,
|
||||
"WorkloadManagement": c.collectWorkloadManagementWorkloads,
|
||||
"RpcClientAccess": c.collectRPC,
|
||||
"MapiHttpEmsmdb": c.collectMapiHttpEmsmdb,
|
||||
}
|
||||
|
||||
for _, collectorName := range c.config.CollectorsEnabled {
|
||||
if err := collectorFuncs[collectorName](ctx, logger, ch); err != nil {
|
||||
logger.Error("Error in "+collectorName,
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [19108] MSExchange ADAccess Processes.
|
||||
type perflibADAccessProcesses struct {
|
||||
Name string
|
||||
|
||||
LDAPReadTime float64 `perflib:"LDAP Read Time"`
|
||||
LDAPSearchTime float64 `perflib:"LDAP Search Time"`
|
||||
LDAPWriteTime float64 `perflib:"LDAP Write Time"`
|
||||
LDAPTimeoutErrorsPerSec float64 `perflib:"LDAP Timeout Errors/sec"`
|
||||
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectADAccessProcesses(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibADAccessProcesses
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ADAccess Processes"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelUseCount := make(map[string]int)
|
||||
|
||||
for _, proc := range data {
|
||||
labelName := c.toLabelName(proc.Name)
|
||||
if strings.HasSuffix(labelName, "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Since we're not including the PID suffix from the instance names in the label names, we get an occasional duplicate.
|
||||
// This seems to affect about 4 instances only of this object.
|
||||
labelUseCount[labelName]++
|
||||
if labelUseCount[labelName] > 1 {
|
||||
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ldapReadTime,
|
||||
prometheus.CounterValue,
|
||||
c.msToSec(proc.LDAPReadTime),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ldapSearchTime,
|
||||
prometheus.CounterValue,
|
||||
c.msToSec(proc.LDAPSearchTime),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ldapWriteTime,
|
||||
prometheus.CounterValue,
|
||||
c.msToSec(proc.LDAPWriteTime),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ldapTimeoutErrorsPerSec,
|
||||
prometheus.CounterValue,
|
||||
proc.LDAPTimeoutErrorsPerSec,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.longRunningLDAPOperationsPerMin,
|
||||
prometheus.CounterValue,
|
||||
proc.LongRunningLDAPOperationsPerMin*60,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [24914] MSExchange Availability Service.
|
||||
type perflibAvailabilityService struct {
|
||||
RequestsSec float64 `perflib:"Availability Requests (sec)"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectAvailabilityService(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibAvailabilityService
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange Availability Service"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, availservice := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availabilityRequestsSec,
|
||||
prometheus.CounterValue,
|
||||
availservice.RequestsSec,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [36934] MSExchange HttpProxy.
|
||||
type perflibHTTPProxy struct {
|
||||
Name string
|
||||
|
||||
MailboxServerLocatorAverageLatency float64 `perflib:"MailboxServerLocator Average Latency (Moving Average)"`
|
||||
AverageAuthenticationLatency float64 `perflib:"Average Authentication Latency"`
|
||||
AverageCASProcessingLatency float64 `perflib:"Average ClientAccess Server Processing Latency"`
|
||||
MailboxServerProxyFailureRate float64 `perflib:"Mailbox Server Proxy Failure Rate"`
|
||||
OutstandingProxyRequests float64 `perflib:"Outstanding Proxy Requests"`
|
||||
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectHTTPProxy(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibHTTPProxy
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange HttpProxy"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
labelName := c.toLabelName(instance.Name)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mailboxServerLocatorAverageLatency,
|
||||
prometheus.GaugeValue,
|
||||
c.msToSec(instance.MailboxServerLocatorAverageLatency),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.averageAuthenticationLatency,
|
||||
prometheus.GaugeValue,
|
||||
instance.AverageAuthenticationLatency,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.averageCASProcessingLatency,
|
||||
prometheus.GaugeValue,
|
||||
c.msToSec(instance.AverageCASProcessingLatency),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.mailboxServerProxyFailureRate,
|
||||
prometheus.GaugeValue,
|
||||
instance.MailboxServerProxyFailureRate,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outstandingProxyRequests,
|
||||
prometheus.GaugeValue,
|
||||
instance.OutstandingProxyRequests,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.proxyRequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
instance.ProxyRequestsPerSec,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [24618] MSExchange OWA.
|
||||
type perflibOWA struct {
|
||||
CurrentUniqueUsers float64 `perflib:"Current Unique Users"`
|
||||
RequestsPerSec float64 `perflib:"Requests/sec"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectOWA(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibOWA
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange OWA"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, owa := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentUniqueUsers,
|
||||
prometheus.GaugeValue,
|
||||
owa.CurrentUniqueUsers,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.owaRequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
owa.RequestsPerSec,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [25138] MSExchange ActiveSync.
|
||||
type perflibActiveSync struct {
|
||||
RequestsPerSec float64 `perflib:"Requests/sec"`
|
||||
PingCommandsPending float64 `perflib:"Ping Commands Pending"`
|
||||
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectActiveSync(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibActiveSync
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange ActiveSync"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.activeSyncRequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
instance.RequestsPerSec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pingCommandsPending,
|
||||
prometheus.GaugeValue,
|
||||
instance.PingCommandsPending,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.syncCommandsPerSec,
|
||||
prometheus.CounterValue,
|
||||
instance.SyncCommandsPerSec,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [29366] MSExchange RpcClientAccess.
|
||||
type perflibRPCClientAccess struct {
|
||||
RPCAveragedLatency float64 `perflib:"RPC Averaged Latency"`
|
||||
RPCRequests float64 `perflib:"RPC Requests"`
|
||||
ActiveUserCount float64 `perflib:"Active User Count"`
|
||||
ConnectionCount float64 `perflib:"Connection Count"`
|
||||
RPCOperationsPerSec float64 `perflib:"RPC Operations/sec"`
|
||||
UserCount float64 `perflib:"User Count"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectRPC(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibRPCClientAccess
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange RpcClientAccess"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, rpc := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.rpcAveragedLatency,
|
||||
prometheus.GaugeValue,
|
||||
c.msToSec(rpc.RPCAveragedLatency),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.rpcRequests,
|
||||
prometheus.GaugeValue,
|
||||
rpc.RPCRequests,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.activeUserCount,
|
||||
prometheus.GaugeValue,
|
||||
rpc.ActiveUserCount,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.connectionCount,
|
||||
prometheus.GaugeValue,
|
||||
rpc.ConnectionCount,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.rpcOperationsPerSec,
|
||||
prometheus.CounterValue,
|
||||
rpc.RPCOperationsPerSec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.userCount,
|
||||
prometheus.GaugeValue,
|
||||
rpc.UserCount,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [20524] MSExchangeTransport Queues.
|
||||
type perflibTransportQueues struct {
|
||||
Name string
|
||||
|
||||
ExternalActiveRemoteDeliveryQueueLength float64 `perflib:"External Active Remote Delivery Queue Length"`
|
||||
InternalActiveRemoteDeliveryQueueLength float64 `perflib:"Internal Active Remote Delivery Queue Length"`
|
||||
ActiveMailboxDeliveryQueueLength float64 `perflib:"Active Mailbox Delivery Queue Length"`
|
||||
RetryMailboxDeliveryQueueLength float64 `perflib:"Retry Mailbox Delivery Queue Length"`
|
||||
UnreachableQueueLength float64 `perflib:"Unreachable Queue Length"`
|
||||
ExternalLargestDeliveryQueueLength float64 `perflib:"External Largest Delivery Queue Length"`
|
||||
InternalLargestDeliveryQueueLength float64 `perflib:"Internal Largest Delivery Queue Length"`
|
||||
PoisonQueueLength float64 `perflib:"Poison Queue Length"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectTransportQueues(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibTransportQueues
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeTransport Queues"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, queue := range data {
|
||||
labelName := c.toLabelName(queue.Name)
|
||||
if strings.HasSuffix(labelName, "_total") {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalActiveRemoteDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.ExternalActiveRemoteDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.internalActiveRemoteDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.InternalActiveRemoteDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.activeMailboxDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.ActiveMailboxDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.retryMailboxDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.RetryMailboxDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.unreachableQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.UnreachableQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalLargestDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.ExternalLargestDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.internalLargestDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.InternalLargestDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poisonQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.PoisonQueueLength,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [19430] MSExchange WorkloadManagement Workloads.
|
||||
type perflibWorkloadManagementWorkloads struct {
|
||||
Name string
|
||||
|
||||
ActiveTasks float64 `perflib:"ActiveTasks"`
|
||||
CompletedTasks float64 `perflib:"CompletedTasks"`
|
||||
QueuedTasks float64 `perflib:"QueuedTasks"`
|
||||
YieldedTasks float64 `perflib:"YieldedTasks"`
|
||||
IsActive float64 `perflib:"Active"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibWorkloadManagementWorkloads
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange WorkloadManagement Workloads"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
labelName := c.toLabelName(instance.Name)
|
||||
if strings.HasSuffix(labelName, "_total") {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.activeTasks,
|
||||
prometheus.GaugeValue,
|
||||
instance.ActiveTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.completedTasks,
|
||||
prometheus.CounterValue,
|
||||
instance.CompletedTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.queuedTasks,
|
||||
prometheus.CounterValue,
|
||||
instance.QueuedTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.yieldedTasks,
|
||||
prometheus.CounterValue,
|
||||
instance.YieldedTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.isActive,
|
||||
prometheus.GaugeValue,
|
||||
instance.IsActive,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// [29240] MSExchangeAutodiscover.
|
||||
type perflibAutodiscover struct {
|
||||
RequestsPerSec float64 `perflib:"Requests/sec"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectAutoDiscover(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibAutodiscover
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchangeAutodiscover"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, autodisc := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.autoDiscoverRequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
autodisc.RequestsPerSec,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// perflib [26463] MSExchange MapiHttp Emsmdb.
|
||||
type perflibMapiHttpEmsmdb struct {
|
||||
ActiveUserCount float64 `perflib:"Active User Count"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibMapiHttpEmsmdb
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["MSExchange MapiHttp Emsmdb"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, mapihttp := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.activeUserCountMapiHttpEmsMDB,
|
||||
prometheus.GaugeValue,
|
||||
mapihttp.ActiveUserCount,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// toLabelName converts strings to lowercase and replaces all whitespaces and dots with underscores.
|
||||
func (c *Collector) toLabelName(name string) string {
|
||||
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
|
||||
s = strings.ReplaceAll(s, "__", "_")
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// msToSec converts from ms to seconds.
|
||||
func (c *Collector) msToSec(t float64) float64 {
|
||||
return t / 1000
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package exchange_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, exchange.Name, exchange.NewWithFlags)
|
||||
}
|
||||
@@ -1,175 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package filetime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/bmatcuk/doublestar/v4"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "filetime"
|
||||
|
||||
type Config struct {
|
||||
filePatterns []string
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
filePatterns: []string{},
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for collecting file times.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
fileMTime *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.filePatterns == nil {
|
||||
config.filePatterns = ConfigDefaults.filePatterns
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
c.config.filePatterns = make([]string, 0)
|
||||
|
||||
var filePatterns string
|
||||
|
||||
app.Flag(
|
||||
"collector.filetime.file-patterns",
|
||||
"Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive). See https://github.com/bmatcuk/doublestar#patterns",
|
||||
).Default(strings.Join(ConfigDefaults.filePatterns, ",")).StringVar(&filePatterns)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
// doublestar.Glob() requires forward slashes
|
||||
c.config.filePatterns = strings.Split(filepath.ToSlash(filePatterns), ",")
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
|
||||
logger.Info("filetime collector is in an experimental state! It may subject to change.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
|
||||
c.fileMTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "mtime_timestamp_seconds"),
|
||||
"File modification time",
|
||||
[]string{"file"},
|
||||
nil,
|
||||
)
|
||||
|
||||
for _, filePattern := range c.config.filePatterns {
|
||||
basePath, pattern := doublestar.SplitPattern(filePattern)
|
||||
|
||||
_, err := doublestar.Glob(os.DirFS(basePath), pattern, doublestar.WithFilesOnly())
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid glob pattern: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
return c.collectGlob(logger, ch)
|
||||
}
|
||||
|
||||
// collectWin32 collects file times for each file path in the config. It using Win32 FindFirstFile and FindNextFile.
|
||||
func (c *Collector) collectGlob(logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, filePattern := range c.config.filePatterns {
|
||||
wg.Add(1)
|
||||
|
||||
go func(filePattern string) {
|
||||
defer wg.Done()
|
||||
|
||||
if err := c.collectGlobFilePath(logger, ch, filePattern); err != nil {
|
||||
logger.Error("failed collecting metrics for filepath",
|
||||
slog.String("filepath", filePattern),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}(filePattern)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectGlobFilePath(logger *slog.Logger, ch chan<- prometheus.Metric, filePattern string) error {
|
||||
basePath, pattern := doublestar.SplitPattern(filePattern)
|
||||
basePathFS := os.DirFS(basePath)
|
||||
|
||||
matches, err := doublestar.Glob(basePathFS, pattern, doublestar.WithFilesOnly())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to glob: %w", err)
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
filePath := filepath.Join(basePath, match)
|
||||
|
||||
fileInfo, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
logger.Warn("failed to state file",
|
||||
slog.String("file", filePath),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fileMTime,
|
||||
prometheus.GaugeValue,
|
||||
float64(fileInfo.ModTime().UTC().Unix()),
|
||||
filePath,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package filetime_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/filetime"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, filetime.Name, filetime.NewWithFlags)
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package fsrmquota
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "fsrmquota"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
quotasCount *prometheus.Desc
|
||||
peakUsage *prometheus.Desc
|
||||
size *prometheus.Desc
|
||||
usage *prometheus.Desc
|
||||
|
||||
description *prometheus.Desc
|
||||
disabled *prometheus.Desc
|
||||
matchesTemplate *prometheus.Desc
|
||||
softLimit *prometheus.Desc
|
||||
template *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
|
||||
c.quotasCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "count"),
|
||||
"Number of Quotas",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.peakUsage = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "peak_usage_bytes"),
|
||||
"The highest amount of disk space usage charged to this quota. (PeakUsage)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
)
|
||||
c.size = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "size_bytes"),
|
||||
"The size of the quota. (Size)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
)
|
||||
c.usage = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "usage_bytes"),
|
||||
"The current amount of disk space usage charged to this quota. (Usage)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
)
|
||||
c.description = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "description"),
|
||||
"Description of the quota (Description)",
|
||||
[]string{"path", "template", "description"},
|
||||
nil,
|
||||
)
|
||||
c.disabled = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "disabled"),
|
||||
"If 1, the quota is disabled. The default value is 0. (Disabled)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
)
|
||||
c.softLimit = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "softlimit"),
|
||||
"If 1, the quota is a soft limit. If 0, the quota is a hard limit. The default value is 0. Optional (SoftLimit)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
)
|
||||
c.template = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "template"),
|
||||
"Quota template name. (Template)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
)
|
||||
c.matchesTemplate = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "matchestemplate"),
|
||||
"If 1, the property values of this quota match those values of the template from which it was derived. (MatchesTemplate)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting fsrmquota metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MSFT_FSRMQuota docs:
|
||||
// https://docs.microsoft.com/en-us/previous-versions/windows/desktop/fsrm/msft-fsrmquota
|
||||
type MSFT_FSRMQuota struct {
|
||||
Name string
|
||||
|
||||
Path string
|
||||
PeakUsage uint64
|
||||
Size uint64
|
||||
Usage uint64
|
||||
Description string
|
||||
Template string
|
||||
// Threshold string
|
||||
Disabled bool
|
||||
MatchesTemplate bool
|
||||
SoftLimit bool
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []MSFT_FSRMQuota
|
||||
|
||||
var count int
|
||||
|
||||
if err := c.wmiClient.Query("SELECT * FROM MSFT_FSRMQuota", &dst, nil, "root/microsoft/windows/fsrm"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, quota := range dst {
|
||||
count++
|
||||
path := quota.Path
|
||||
template := quota.Template
|
||||
Description := quota.Description
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.peakUsage,
|
||||
prometheus.GaugeValue,
|
||||
float64(quota.PeakUsage),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.size,
|
||||
prometheus.GaugeValue,
|
||||
float64(quota.Size),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.usage,
|
||||
prometheus.GaugeValue,
|
||||
float64(quota.Usage),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.description,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
path, template, Description,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.disabled,
|
||||
prometheus.GaugeValue,
|
||||
utils.BoolToFloat(quota.Disabled),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.matchesTemplate,
|
||||
prometheus.GaugeValue,
|
||||
utils.BoolToFloat(quota.MatchesTemplate),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.softLimit,
|
||||
prometheus.GaugeValue,
|
||||
utils.BoolToFloat(quota.SoftLimit),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.quotasCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package fsrmquota_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, fsrmquota.Name, fsrmquota.NewWithFlags)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,12 +0,0 @@
|
||||
package hyperv_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, hyperv.Name, hyperv.NewWithFlags)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,12 +0,0 @@
|
||||
package iis_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, iis.Name, iis.NewWithFlags)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package iis
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIISDeduplication(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
start := []perflibAPP_POOL_WAS{
|
||||
{
|
||||
Name: "foo",
|
||||
Frequency_Object: 1,
|
||||
},
|
||||
{
|
||||
Name: "foo1#999",
|
||||
Frequency_Object: 2,
|
||||
},
|
||||
{
|
||||
Name: "foo#2",
|
||||
Frequency_Object: 3,
|
||||
},
|
||||
{
|
||||
Name: "bar$2",
|
||||
Frequency_Object: 4,
|
||||
},
|
||||
{
|
||||
Name: "bar_2",
|
||||
Frequency_Object: 5,
|
||||
},
|
||||
}
|
||||
expected := make(map[string]perflibAPP_POOL_WAS)
|
||||
// Should be deduplicated from "foo#2"
|
||||
expected["foo"] = perflibAPP_POOL_WAS{Name: "foo#2", Frequency_Object: 3}
|
||||
// Map key should have suffix stripped, but struct name field should be unchanged
|
||||
expected["foo1"] = perflibAPP_POOL_WAS{Name: "foo1#999", Frequency_Object: 2}
|
||||
// Map key and Name should be identical, as there is no suffix starting with "#"
|
||||
expected["bar$2"] = perflibAPP_POOL_WAS{Name: "bar$2", Frequency_Object: 4}
|
||||
// Map key and Name should be identical, as there is no suffix starting with "#"
|
||||
expected["bar_2"] = perflibAPP_POOL_WAS{Name: "bar_2", Frequency_Object: 5}
|
||||
|
||||
deduplicated := dedupIISNames(start)
|
||||
if !reflect.DeepEqual(expected, deduplicated) {
|
||||
t.Errorf("Flattened values do not match!\nExpected result: %+v\nActual result: %+v", expected, deduplicated)
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package license
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/headers/slc"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "license"
|
||||
|
||||
var labelMap = map[slc.SL_GENUINE_STATE]string{
|
||||
slc.SL_GEN_STATE_IS_GENUINE: "genuine",
|
||||
slc.SL_GEN_STATE_INVALID_LICENSE: "invalid_license",
|
||||
slc.SL_GEN_STATE_TAMPERED: "tampered",
|
||||
slc.SL_GEN_STATE_OFFLINE: "offline",
|
||||
slc.SL_GEN_STATE_LAST: "last",
|
||||
}
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DNS_DNS metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
licenseStatus *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
c.licenseStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "status"),
|
||||
"Status of windows license",
|
||||
[]string{"state"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting license metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
status, err := slc.SLIsWindowsGenuineLocal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range labelMap {
|
||||
val := 0.0
|
||||
if status == k {
|
||||
val = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(c.licenseStatus, prometheus.GaugeValue, val, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package license_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, license.Name, license.NewWithFlags)
|
||||
}
|
||||
@@ -1,565 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package logical_disk
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const Name = "logical_disk"
|
||||
|
||||
type Config struct {
|
||||
VolumeInclude *regexp.Regexp `yaml:"volume_include"`
|
||||
VolumeExclude *regexp.Regexp `yaml:"volume_exclude"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
VolumeInclude: types.RegExpAny,
|
||||
VolumeExclude: types.RegExpEmpty,
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for perflib logicalDisk metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
avgReadQueue *prometheus.Desc
|
||||
avgWriteQueue *prometheus.Desc
|
||||
freeSpace *prometheus.Desc
|
||||
idleTime *prometheus.Desc
|
||||
information *prometheus.Desc
|
||||
readBytesTotal *prometheus.Desc
|
||||
readLatency *prometheus.Desc
|
||||
readOnly *prometheus.Desc
|
||||
readsTotal *prometheus.Desc
|
||||
readTime *prometheus.Desc
|
||||
readWriteLatency *prometheus.Desc
|
||||
requestsQueued *prometheus.Desc
|
||||
splitIOs *prometheus.Desc
|
||||
totalSpace *prometheus.Desc
|
||||
writeBytesTotal *prometheus.Desc
|
||||
writeLatency *prometheus.Desc
|
||||
writesTotal *prometheus.Desc
|
||||
writeTime *prometheus.Desc
|
||||
}
|
||||
|
||||
type volumeInfo struct {
|
||||
filesystem string
|
||||
serialNumber string
|
||||
label string
|
||||
volumeType string
|
||||
readonly float64
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.VolumeExclude == nil {
|
||||
config.VolumeExclude = ConfigDefaults.VolumeExclude
|
||||
}
|
||||
|
||||
if config.VolumeInclude == nil {
|
||||
config.VolumeInclude = ConfigDefaults.VolumeInclude
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
var volumeExclude, volumeInclude string
|
||||
|
||||
app.Flag(
|
||||
"collector.logical_disk.volume-exclude",
|
||||
"Regexp of volumes to exclude. Volume name must both match include and not match exclude to be included.",
|
||||
).Default(c.config.VolumeExclude.String()).StringVar(&volumeExclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.logical_disk.volume-include",
|
||||
"Regexp of volumes to include. Volume name must both match include and not match exclude to be included.",
|
||||
).Default(c.config.VolumeInclude.String()).StringVar(&volumeInclude)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
var err error
|
||||
|
||||
c.config.VolumeExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", volumeExclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.logical_disk.volume-exclude: %w", err)
|
||||
}
|
||||
|
||||
c.config.VolumeInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", volumeInclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.logical_disk.volume-include: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"LogicalDisk"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
c.information = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"A metric with a constant '1' value labeled with logical disk information",
|
||||
[]string{"disk", "type", "volume", "volume_name", "filesystem", "serial_number"},
|
||||
nil,
|
||||
)
|
||||
c.readOnly = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "readonly"),
|
||||
"Whether the logical disk is read-only",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
c.requestsQueued = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
|
||||
"The number of requests queued to the disk (LogicalDisk.CurrentDiskQueueLength)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.avgReadQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "avg_read_requests_queued"),
|
||||
"Average number of read requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskReadQueueLength)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.avgWriteQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "avg_write_requests_queued"),
|
||||
"Average number of write requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskWriteQueueLength)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readBytesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_bytes_total"),
|
||||
"The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "reads_total"),
|
||||
"The number of read operations on the disk (LogicalDisk.DiskReadsPerSec)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writeBytesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "write_bytes_total"),
|
||||
"The number of bytes transferred to the disk during write operations (LogicalDisk.DiskWriteBytesPerSec)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "writes_total"),
|
||||
"The number of write operations on the disk (LogicalDisk.DiskWritesPerSec)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_seconds_total"),
|
||||
"Seconds that the disk was busy servicing read requests (LogicalDisk.PercentDiskReadTime)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writeTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "write_seconds_total"),
|
||||
"Seconds that the disk was busy servicing write requests (LogicalDisk.PercentDiskWriteTime)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.freeSpace = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "free_bytes"),
|
||||
"Free space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.totalSpace = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "size_bytes"),
|
||||
"Total space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace_Base)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.idleTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "idle_seconds_total"),
|
||||
"Seconds that the disk was idle (LogicalDisk.PercentIdleTime)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.splitIOs = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "split_ios_total"),
|
||||
"The number of I/Os to the disk were split into multiple I/Os (LogicalDisk.SplitIOPerSec)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readLatency = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writeLatency = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "write_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readWriteLatency = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_write_latency_seconds_total"),
|
||||
"Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting logical_disk metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
|
||||
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference.
|
||||
type logicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
|
||||
AvgDiskReadQueueLength float64 `perflib:"Avg. Disk Read Queue Length"`
|
||||
AvgDiskWriteQueueLength float64 `perflib:"Avg. Disk Write Queue Length"`
|
||||
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
|
||||
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
|
||||
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
|
||||
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
|
||||
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
|
||||
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
|
||||
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
|
||||
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
|
||||
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
|
||||
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
|
||||
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var (
|
||||
err error
|
||||
diskID string
|
||||
info volumeInfo
|
||||
dst []logicalDisk
|
||||
)
|
||||
|
||||
if err = perflib.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, volume := range dst {
|
||||
if volume.Name == "_Total" ||
|
||||
c.config.VolumeExclude.MatchString(volume.Name) ||
|
||||
!c.config.VolumeInclude.MatchString(volume.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
diskID, err = getDiskIDByVolume(volume.Name)
|
||||
if err != nil {
|
||||
logger.Warn("failed to get disk ID for "+volume.Name,
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
|
||||
info, err = getVolumeInfo(volume.Name)
|
||||
if err != nil {
|
||||
logger.Warn("failed to get volume information for %s"+volume.Name,
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.information,
|
||||
prometheus.GaugeValue,
|
||||
1,
|
||||
diskID,
|
||||
info.volumeType,
|
||||
volume.Name,
|
||||
info.label,
|
||||
info.filesystem,
|
||||
info.serialNumber,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestsQueued,
|
||||
prometheus.GaugeValue,
|
||||
volume.CurrentDiskQueueLength,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgReadQueue,
|
||||
prometheus.GaugeValue,
|
||||
volume.AvgDiskReadQueueLength*perflib.TicksToSecondScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgWriteQueue,
|
||||
prometheus.GaugeValue,
|
||||
volume.AvgDiskWriteQueueLength*perflib.TicksToSecondScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.DiskReadBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readsTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.DiskReadsPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.DiskWriteBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writesTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.DiskWritesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readTime,
|
||||
prometheus.CounterValue,
|
||||
volume.PercentDiskReadTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeTime,
|
||||
prometheus.CounterValue,
|
||||
volume.PercentDiskWriteTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.freeSpace,
|
||||
prometheus.GaugeValue,
|
||||
volume.PercentFreeSpace_Base*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalSpace,
|
||||
prometheus.GaugeValue,
|
||||
volume.PercentFreeSpace*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.idleTime,
|
||||
prometheus.CounterValue,
|
||||
volume.PercentIdleTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.splitIOs,
|
||||
prometheus.CounterValue,
|
||||
volume.SplitIOPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerRead*perflib.TicksToSecondScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerWrite*perflib.TicksToSecondScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readWriteLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerTransfer*perflib.TicksToSecondScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDriveType(driveType uint32) string {
|
||||
switch driveType {
|
||||
case windows.DRIVE_UNKNOWN:
|
||||
return "unknown"
|
||||
case windows.DRIVE_NO_ROOT_DIR:
|
||||
return "norootdir"
|
||||
case windows.DRIVE_REMOVABLE:
|
||||
return "removable"
|
||||
case windows.DRIVE_FIXED:
|
||||
return "fixed"
|
||||
case windows.DRIVE_REMOTE:
|
||||
return "remote"
|
||||
case windows.DRIVE_CDROM:
|
||||
return "cdrom"
|
||||
case windows.DRIVE_RAMDISK:
|
||||
return "ramdisk"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// diskExtentSize Size of the DiskExtent structure in bytes.
|
||||
const diskExtentSize = 24
|
||||
|
||||
// getDiskIDByVolume returns the disk ID for a given volume.
|
||||
func getDiskIDByVolume(rootDrive string) (string, error) {
|
||||
// Open a volume handle to the Disk Root.
|
||||
var err error
|
||||
|
||||
var f windows.Handle
|
||||
|
||||
// mode has to include FILE_SHARE permission to allow concurrent access to the disk.
|
||||
// use 0 as access mode to avoid admin permission.
|
||||
mode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE | windows.FILE_SHARE_DELETE)
|
||||
|
||||
f, err = windows.CreateFile(
|
||||
windows.StringToUTF16Ptr(`\\.\`+rootDrive),
|
||||
0, mode, nil, windows.OPEN_EXISTING, uint32(windows.FILE_ATTRIBUTE_READONLY), 0)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer windows.Close(f)
|
||||
|
||||
controlCode := uint32(5636096) // IOCTL_VOLUME_GET_VOLUME_DISK_EXTENTS
|
||||
volumeDiskExtents := make([]byte, 16*1024)
|
||||
|
||||
var bytesReturned uint32
|
||||
|
||||
err = windows.DeviceIoControl(f, controlCode, nil, 0, &volumeDiskExtents[0], uint32(len(volumeDiskExtents)), &bytesReturned, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not identify physical drive for %s: %w", rootDrive, err)
|
||||
}
|
||||
|
||||
numDiskIDs := uint(binary.LittleEndian.Uint32(volumeDiskExtents))
|
||||
if numDiskIDs < 1 {
|
||||
return "", fmt.Errorf("could not identify physical drive for %s: no disk IDs returned", rootDrive)
|
||||
}
|
||||
|
||||
diskIDs := make([]string, numDiskIDs)
|
||||
|
||||
for i := range numDiskIDs {
|
||||
diskIDs[i] = strconv.FormatUint(uint64(binary.LittleEndian.Uint32(volumeDiskExtents[8+i*diskExtentSize:])), 10)
|
||||
}
|
||||
|
||||
slices.Sort(diskIDs)
|
||||
diskIDs = slices.Compact(diskIDs)
|
||||
|
||||
return strings.Join(diskIDs, ";"), nil
|
||||
}
|
||||
|
||||
func getVolumeInfo(rootDrive string) (volumeInfo, error) {
|
||||
if !strings.HasSuffix(rootDrive, ":") {
|
||||
return volumeInfo{}, nil
|
||||
}
|
||||
|
||||
volPath := windows.StringToUTF16Ptr(rootDrive + `\`)
|
||||
|
||||
volBufLabel := make([]uint16, windows.MAX_PATH+1)
|
||||
volSerialNum := uint32(0)
|
||||
fsFlags := uint32(0)
|
||||
volBufType := make([]uint16, windows.MAX_PATH+1)
|
||||
|
||||
driveType := windows.GetDriveType(volPath)
|
||||
|
||||
err := windows.GetVolumeInformation(volPath, &volBufLabel[0], uint32(len(volBufLabel)),
|
||||
&volSerialNum, nil, &fsFlags, &volBufType[0], uint32(len(volBufType)))
|
||||
if err != nil {
|
||||
if driveType != windows.DRIVE_CDROM && driveType != windows.DRIVE_REMOVABLE {
|
||||
return volumeInfo{}, err
|
||||
}
|
||||
|
||||
return volumeInfo{}, nil
|
||||
}
|
||||
|
||||
return volumeInfo{
|
||||
volumeType: getDriveType(driveType),
|
||||
label: windows.UTF16PtrToString(&volBufLabel[0]),
|
||||
filesystem: windows.UTF16PtrToString(&volBufType[0]),
|
||||
serialNumber: fmt.Sprintf("%X", volSerialNum),
|
||||
readonly: float64(fsFlags & windows.FILE_READ_ONLY_VOLUME),
|
||||
}, nil
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package logical_disk_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
// Whitelist is not set in testing context (kingpin flags not parsed), causing the Collector to skip all disks.
|
||||
localVolumeInclude := ".+"
|
||||
kingpin.CommandLine.GetArg("collector.logical_disk.volume-include").StringVar(&localVolumeInclude)
|
||||
testutils.FuncBenchmarkCollector(b, "logical_disk", logical_disk.NewWithFlags)
|
||||
}
|
||||
@@ -1,242 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package logon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "logon"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
logonType *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
c.logonType = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logon_type"),
|
||||
"Number of active logon sessions (LogonSession.LogonType)",
|
||||
[]string{"status"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting user metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_LogonSession docs:
|
||||
// - https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession
|
||||
type Win32_LogonSession struct {
|
||||
LogonType uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_LogonSession
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_LogonSession", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Init counters
|
||||
system := 0
|
||||
interactive := 0
|
||||
network := 0
|
||||
batch := 0
|
||||
service := 0
|
||||
proxy := 0
|
||||
unlock := 0
|
||||
networkcleartext := 0
|
||||
newcredentials := 0
|
||||
remoteinteractive := 0
|
||||
cachedinteractive := 0
|
||||
cachedremoteinteractive := 0
|
||||
cachedunlock := 0
|
||||
|
||||
for _, entry := range dst {
|
||||
switch entry.LogonType {
|
||||
case 0:
|
||||
system++
|
||||
case 2:
|
||||
interactive++
|
||||
case 3:
|
||||
network++
|
||||
case 4:
|
||||
batch++
|
||||
case 5:
|
||||
service++
|
||||
case 6:
|
||||
proxy++
|
||||
case 7:
|
||||
unlock++
|
||||
case 8:
|
||||
networkcleartext++
|
||||
case 9:
|
||||
newcredentials++
|
||||
case 10:
|
||||
remoteinteractive++
|
||||
case 11:
|
||||
cachedinteractive++
|
||||
case 12:
|
||||
cachedremoteinteractive++
|
||||
case 13:
|
||||
cachedunlock++
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(system),
|
||||
"system",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(interactive),
|
||||
"interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(network),
|
||||
"network",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(batch),
|
||||
"batch",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(service),
|
||||
"service",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(proxy),
|
||||
"proxy",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(unlock),
|
||||
"unlock",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(networkcleartext),
|
||||
"network_clear_text",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(newcredentials),
|
||||
"new_credentials",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedinteractive),
|
||||
"cached_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"cached_remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.logonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedunlock),
|
||||
"cached_unlock",
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package logon_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logon"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
// No context name required as Collector source is WMI
|
||||
testutils.FuncBenchmarkCollector(b, logon.Name, logon.NewWithFlags)
|
||||
}
|
||||
@@ -5,54 +5,54 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/ad"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adcs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/adfs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cache"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/container"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cpu_info"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/cs"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dfsr"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dhcp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/diskdrive"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/dns"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/exchange"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/filetime"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/fsrmquota"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/hyperv"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/iis"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/license"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/logon"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/memory"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/mscluster"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/net"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/netframework"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/nps"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/os"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/process"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/remote_fx"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/scheduled_task"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/service"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smb"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smtp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/system"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/tcp"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/teradici_pcoip"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/terminal_services"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/textfile"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/thermalzone"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/time"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/updates"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/vmware_blast"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/ad"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/adcs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/adfs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cache"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/container"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cpu"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cpu_info"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/cs"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dfsr"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dhcp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/diskdrive"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/dns"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/exchange"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/filetime"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/fsrmquota"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/hyperv"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/iis"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/license"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/logical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/logon"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/memory"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/mscluster"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/msmq"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/mssql"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/net"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/netframework"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/nps"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/os"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/physical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/printer"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/process"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/remote_fx"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/scheduled_task"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/service"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smb"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smbclient"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/smtp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/system"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/tcp"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/teradici_pcoip"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/terminal_services"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/textfile"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/thermalzone"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/time"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/updates"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/vmware"
|
||||
"github.com/prometheus-community/windows_exporter/internal/collector/vmware_blast"
|
||||
)
|
||||
|
||||
func NewBuilderWithFlags[C Collector](fn BuilderWithFlags[C]) BuilderWithFlags[Collector] {
|
||||
|
||||
@@ -1,618 +0,0 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Memory
|
||||
// <add link to documentation here> - Win32_PerfRawData_PerfOS_Memory class
|
||||
|
||||
//go:build windows
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "memory"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for perflib Memory metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
// Performance metrics
|
||||
availableBytes *prometheus.Desc
|
||||
cacheBytes *prometheus.Desc
|
||||
cacheBytesPeak *prometheus.Desc
|
||||
cacheFaultsTotal *prometheus.Desc
|
||||
commitLimit *prometheus.Desc
|
||||
committedBytes *prometheus.Desc
|
||||
demandZeroFaultsTotal *prometheus.Desc
|
||||
freeAndZeroPageListBytes *prometheus.Desc
|
||||
freeSystemPageTableEntries *prometheus.Desc
|
||||
modifiedPageListBytes *prometheus.Desc
|
||||
pageFaultsTotal *prometheus.Desc
|
||||
swapPageReadsTotal *prometheus.Desc
|
||||
swapPagesReadTotal *prometheus.Desc
|
||||
swapPagesWrittenTotal *prometheus.Desc
|
||||
swapPageOperationsTotal *prometheus.Desc
|
||||
swapPageWritesTotal *prometheus.Desc
|
||||
poolNonPagedAllocationsTotal *prometheus.Desc
|
||||
poolNonPagedBytes *prometheus.Desc
|
||||
poolPagedAllocationsTotal *prometheus.Desc
|
||||
poolPagedBytes *prometheus.Desc
|
||||
poolPagedResidentBytes *prometheus.Desc
|
||||
standbyCacheCoreBytes *prometheus.Desc
|
||||
standbyCacheNormalPriorityBytes *prometheus.Desc
|
||||
standbyCacheReserveBytes *prometheus.Desc
|
||||
systemCacheResidentBytes *prometheus.Desc
|
||||
systemCodeResidentBytes *prometheus.Desc
|
||||
systemCodeTotalBytes *prometheus.Desc
|
||||
systemDriverResidentBytes *prometheus.Desc
|
||||
systemDriverTotalBytes *prometheus.Desc
|
||||
transitionFaultsTotal *prometheus.Desc
|
||||
transitionPagesRepurposedTotal *prometheus.Desc
|
||||
writeCopiesTotal *prometheus.Desc
|
||||
|
||||
// Global memory status
|
||||
processMemoryLimitBytes *prometheus.Desc
|
||||
physicalMemoryTotalBytes *prometheus.Desc
|
||||
physicalMemoryFreeBytes *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"Memory"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
c.availableBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
|
||||
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
|
||||
" the standby (cached), free and zero page lists (AvailableBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.cacheBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cache_bytes"),
|
||||
"(CacheBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.cacheBytesPeak = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cache_bytes_peak"),
|
||||
"(CacheBytesPeak)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.cacheFaultsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cache_faults_total"),
|
||||
"Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) "+
|
||||
"or from disk (hard fault) (Cache Faults/sec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.commitLimit = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "commit_limit"),
|
||||
"(CommitLimit)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.committedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "committed_bytes"),
|
||||
"(CommittedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.demandZeroFaultsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "demand_zero_faults_total"),
|
||||
"The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security"+
|
||||
" feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (Demand Zero Faults/sec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.freeAndZeroPageListBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "free_and_zero_page_list_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the free and zero page lists. This memory does not contain cached data. It is immediately"+
|
||||
" available for allocation to a process or for system use (FreeAndZeroPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.freeSystemPageTableEntries = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "free_system_page_table_entries"),
|
||||
"(FreeSystemPageTableEntries)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.modifiedPageListBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "modified_page_list_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the modified page list. This memory contains cached data and code that is not actively in "+
|
||||
"use by processes, the system and the system cache (ModifiedPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.pageFaultsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "page_faults_total"),
|
||||
"Overall rate at which faulted pages are handled by the processor (Page Faults/sec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.swapPageReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "swap_page_reads_total"),
|
||||
"Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.swapPagesReadTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "swap_pages_read_total"),
|
||||
"Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.swapPagesWrittenTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "swap_pages_written_total"),
|
||||
"Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.swapPageOperationsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "swap_page_operations_total"),
|
||||
"Total number of swap page read and writes (PagesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.swapPageWritesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "swap_page_writes_total"),
|
||||
"Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.poolNonPagedAllocationsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pool_nonpaged_allocs_total"),
|
||||
"The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written"+
|
||||
" to disk, and must remain in physical memory as long as they are allocated (PoolNonpagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.poolNonPagedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pool_nonpaged_bytes"),
|
||||
"Number of bytes in the non-paged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must "+
|
||||
"remain in physical memory as long as they are allocated (PoolNonpagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.poolPagedAllocationsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pool_paged_allocs_total"),
|
||||
"Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call (PoolPagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.poolPagedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pool_paged_bytes"),
|
||||
"(PoolPagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.poolPagedResidentBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pool_paged_resident_bytes"),
|
||||
"The size, in bytes, of the portion of the paged pool that is currently resident and active in physical memory. The paged pool is an area of the "+
|
||||
"system virtual memory that is used for objects that can be written to disk when they are not being used (PoolPagedResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.standbyCacheCoreBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "standby_cache_core_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the core standby cache page lists. This memory contains cached data and code that is "+
|
||||
"not actively in use by processes, the system and the system cache (StandbyCacheCoreBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.standbyCacheNormalPriorityBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "standby_cache_normal_priority_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the normal priority standby cache page lists. This memory contains cached data and "+
|
||||
"code that is not actively in use by processes, the system and the system cache (StandbyCacheNormalPriorityBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.standbyCacheReserveBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "standby_cache_reserve_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the reserve standby cache page lists. This memory contains cached data and code "+
|
||||
"that is not actively in use by processes, the system and the system cache (StandbyCacheReserveBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.systemCacheResidentBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "system_cache_resident_bytes"),
|
||||
"The size, in bytes, of the portion of the system file cache which is currently resident and active in physical memory (SystemCacheResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.systemCodeResidentBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "system_code_resident_bytes"),
|
||||
"The size, in bytes, of the pageable operating system code that is currently resident and active in physical memory (SystemCodeResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.systemCodeTotalBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "system_code_total_bytes"),
|
||||
"The size, in bytes, of the pageable operating system code currently mapped into the system virtual address space (SystemCodeTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.systemDriverResidentBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "system_driver_resident_bytes"),
|
||||
"The size, in bytes, of the pageable physical memory being used by device drivers. It is the working set (physical memory area) of the drivers (SystemDriverResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.systemDriverTotalBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "system_driver_total_bytes"),
|
||||
"The size, in bytes, of the pageable virtual memory currently being used by device drivers. Pageable memory can be written to disk when it is not being used (SystemDriverTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.transitionFaultsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "transition_faults_total"),
|
||||
"Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the "+
|
||||
"modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.transitionPagesRepurposedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "transition_pages_repurposed_total"),
|
||||
"Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.writeCopiesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "write_copies_total"),
|
||||
"The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.processMemoryLimitBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "process_memory_limit_bytes"),
|
||||
"The size of the user-mode portion of the virtual address space of the calling process, in bytes. This value depends on the type of process, the type of processor, and the configuration of the operating system.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.physicalMemoryTotalBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "physical_total_bytes"),
|
||||
"The amount of actual physical memory, in bytes.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.physicalMemoryFreeBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "physical_free_bytes"),
|
||||
"The amount of physical memory currently available, in bytes. This is the amount of physical memory that can be immediately reused without having to write its contents to disk first. It is the sum of the size of the standby, free, and zero lists.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
errs := make([]error, 0, 2)
|
||||
|
||||
if err := c.collectPerformanceData(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting memory metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := c.collectGlobalMemoryStatus(ch); err != nil {
|
||||
logger.Error("failed collecting memory metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectGlobalMemoryStatus(ch chan<- prometheus.Metric) error {
|
||||
memoryStatusEx, err := sysinfoapi.GlobalMemoryStatusEx()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get memory status: %w", err)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processMemoryLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.TotalVirtual),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.physicalMemoryTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.TotalPhys),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.physicalMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.AvailPhys),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type memory struct {
|
||||
AvailableBytes float64 `perflib:"Available Bytes"`
|
||||
AvailableKBytes float64 `perflib:"Available KBytes"`
|
||||
AvailableMBytes float64 `perflib:"Available MBytes"`
|
||||
CacheBytes float64 `perflib:"Cache Bytes"`
|
||||
CacheBytesPeak float64 `perflib:"Cache Bytes Peak"`
|
||||
CacheFaultsPersec float64 `perflib:"Cache Faults/sec"`
|
||||
CommitLimit float64 `perflib:"Commit Limit"`
|
||||
CommittedBytes float64 `perflib:"Committed Bytes"`
|
||||
DemandZeroFaultsPersec float64 `perflib:"Demand Zero Faults/sec"`
|
||||
FreeAndZeroPageListBytes float64 `perflib:"Free & Zero Page List Bytes"`
|
||||
FreeSystemPageTableEntries float64 `perflib:"Free System Page Table Entries"`
|
||||
ModifiedPageListBytes float64 `perflib:"Modified Page List Bytes"`
|
||||
PageFaultsPersec float64 `perflib:"Page Faults/sec"`
|
||||
PageReadsPersec float64 `perflib:"Page Reads/sec"`
|
||||
PagesInputPersec float64 `perflib:"Pages Input/sec"`
|
||||
PagesOutputPersec float64 `perflib:"Pages Output/sec"`
|
||||
PagesPersec float64 `perflib:"Pages/sec"`
|
||||
PageWritesPersec float64 `perflib:"Page Writes/sec"`
|
||||
PoolNonpagedAllocs float64 `perflib:"Pool Nonpaged Allocs"`
|
||||
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
|
||||
PoolPagedAllocs float64 `perflib:"Pool Paged Allocs"`
|
||||
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
|
||||
PoolPagedResidentBytes float64 `perflib:"Pool Paged Resident Bytes"`
|
||||
StandbyCacheCoreBytes float64 `perflib:"Standby Cache Core Bytes"`
|
||||
StandbyCacheNormalPriorityBytes float64 `perflib:"Standby Cache Normal Priority Bytes"`
|
||||
StandbyCacheReserveBytes float64 `perflib:"Standby Cache Reserve Bytes"`
|
||||
SystemCacheResidentBytes float64 `perflib:"System Cache Resident Bytes"`
|
||||
SystemCodeResidentBytes float64 `perflib:"System Code Resident Bytes"`
|
||||
SystemCodeTotalBytes float64 `perflib:"System Code Total Bytes"`
|
||||
SystemDriverResidentBytes float64 `perflib:"System Driver Resident Bytes"`
|
||||
SystemDriverTotalBytes float64 `perflib:"System Driver Total Bytes"`
|
||||
TransitionFaultsPersec float64 `perflib:"Transition Faults/sec"`
|
||||
TransitionPagesRePurposedPersec float64 `perflib:"Transition Pages RePurposed/sec"`
|
||||
WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var dst []memory
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.availableBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].AvailableBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CacheBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cacheBytesPeak,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CacheBytesPeak,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cacheFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].CacheFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.commitLimit,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CommitLimit,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.committedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CommittedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.demandZeroFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DemandZeroFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.freeAndZeroPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].FreeAndZeroPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.freeSystemPageTableEntries,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].FreeSystemPageTableEntries,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.modifiedPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].ModifiedPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pageFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PageFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.swapPageReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PageReadsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.swapPagesReadTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PagesInputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.swapPagesWrittenTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PagesOutputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.swapPageOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PagesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.swapPageWritesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PageWritesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolNonPagedAllocationsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolNonpagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolNonPagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolNonpagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolPagedAllocationsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PoolPagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolPagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolPagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolPagedResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolPagedResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.standbyCacheCoreBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheCoreBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.standbyCacheNormalPriorityBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheNormalPriorityBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.standbyCacheReserveBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheReserveBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.systemCacheResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCacheResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.systemCodeResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCodeResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.systemCodeTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCodeTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.systemDriverResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemDriverResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.systemDriverTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemDriverTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transitionFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].TransitionFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.transitionPagesRepurposedTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].TransitionPagesRePurposedPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeCopiesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].WriteCopiesPersec,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package memory_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/memory"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, memory.Name, memory.NewWithFlags)
|
||||
}
|
||||
@@ -1,302 +0,0 @@
|
||||
package mscluster
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "mscluster"
|
||||
|
||||
type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
"cluster",
|
||||
"network",
|
||||
"node",
|
||||
"resource",
|
||||
"resourcegroup",
|
||||
},
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI MSCluster_Cluster metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
// cluster
|
||||
clusterAddEvictDelay *prometheus.Desc
|
||||
clusterAdminAccessPoint *prometheus.Desc
|
||||
clusterAutoAssignNodeSite *prometheus.Desc
|
||||
clusterAutoBalancerLevel *prometheus.Desc
|
||||
clusterAutoBalancerMode *prometheus.Desc
|
||||
clusterBackupInProgress *prometheus.Desc
|
||||
clusterBlockCacheSize *prometheus.Desc
|
||||
clusterClusSvcHangTimeout *prometheus.Desc
|
||||
clusterClusSvcRegroupOpeningTimeout *prometheus.Desc
|
||||
clusterClusSvcRegroupPruningTimeout *prometheus.Desc
|
||||
clusterClusSvcRegroupStageTimeout *prometheus.Desc
|
||||
clusterClusSvcRegroupTickInMilliseconds *prometheus.Desc
|
||||
clusterClusterEnforcedAntiAffinity *prometheus.Desc
|
||||
clusterClusterFunctionalLevel *prometheus.Desc
|
||||
clusterClusterGroupWaitDelay *prometheus.Desc
|
||||
clusterClusterLogLevel *prometheus.Desc
|
||||
clusterClusterLogSize *prometheus.Desc
|
||||
clusterClusterUpgradeVersion *prometheus.Desc
|
||||
clusterCrossSiteDelay *prometheus.Desc
|
||||
clusterCrossSiteThreshold *prometheus.Desc
|
||||
clusterCrossSubnetDelay *prometheus.Desc
|
||||
clusterCrossSubnetThreshold *prometheus.Desc
|
||||
clusterCsvBalancer *prometheus.Desc
|
||||
clusterDatabaseReadWriteMode *prometheus.Desc
|
||||
clusterDefaultNetworkRole *prometheus.Desc
|
||||
clusterDetectedCloudPlatform *prometheus.Desc
|
||||
clusterDetectManagedEvents *prometheus.Desc
|
||||
clusterDetectManagedEventsThreshold *prometheus.Desc
|
||||
clusterDisableGroupPreferredOwnerRandomization *prometheus.Desc
|
||||
clusterDrainOnShutdown *prometheus.Desc
|
||||
clusterDynamicQuorumEnabled *prometheus.Desc
|
||||
clusterEnableSharedVolumes *prometheus.Desc
|
||||
clusterFixQuorum *prometheus.Desc
|
||||
clusterGracePeriodEnabled *prometheus.Desc
|
||||
clusterGracePeriodTimeout *prometheus.Desc
|
||||
clusterGroupDependencyTimeout *prometheus.Desc
|
||||
clusterHangRecoveryAction *prometheus.Desc
|
||||
clusterIgnorePersistentStateOnStartup *prometheus.Desc
|
||||
clusterLogResourceControls *prometheus.Desc
|
||||
clusterLowerQuorumPriorityNodeId *prometheus.Desc
|
||||
clusterMaxNumberOfNodes *prometheus.Desc
|
||||
clusterMessageBufferLength *prometheus.Desc
|
||||
clusterMinimumNeverPreemptPriority *prometheus.Desc
|
||||
clusterMinimumPreemptorPriority *prometheus.Desc
|
||||
clusterNetftIPSecEnabled *prometheus.Desc
|
||||
clusterPlacementOptions *prometheus.Desc
|
||||
clusterPlumbAllCrossSubnetRoutes *prometheus.Desc
|
||||
clusterPreventQuorum *prometheus.Desc
|
||||
clusterQuarantineDuration *prometheus.Desc
|
||||
clusterQuarantineThreshold *prometheus.Desc
|
||||
clusterQuorumArbitrationTimeMax *prometheus.Desc
|
||||
clusterQuorumArbitrationTimeMin *prometheus.Desc
|
||||
clusterQuorumLogFileSize *prometheus.Desc
|
||||
clusterQuorumTypeValue *prometheus.Desc
|
||||
clusterRequestReplyTimeout *prometheus.Desc
|
||||
clusterResiliencyDefaultPeriod *prometheus.Desc
|
||||
clusterResiliencyLevel *prometheus.Desc
|
||||
clusterResourceDllDeadlockPeriod *prometheus.Desc
|
||||
clusterRootMemoryReserved *prometheus.Desc
|
||||
clusterRouteHistoryLength *prometheus.Desc
|
||||
clusterS2DBusTypes *prometheus.Desc
|
||||
clusterS2DCacheDesiredState *prometheus.Desc
|
||||
clusterS2DCacheFlashReservePercent *prometheus.Desc
|
||||
clusterS2DCachePageSizeKBytes *prometheus.Desc
|
||||
clusterS2DEnabled *prometheus.Desc
|
||||
clusterS2DIOLatencyThreshold *prometheus.Desc
|
||||
clusterS2DOptimizations *prometheus.Desc
|
||||
clusterSameSubnetDelay *prometheus.Desc
|
||||
clusterSameSubnetThreshold *prometheus.Desc
|
||||
clusterSecurityLevel *prometheus.Desc
|
||||
clusterSecurityLevelForStorage *prometheus.Desc
|
||||
clusterSharedVolumeVssWriterOperationTimeout *prometheus.Desc
|
||||
clusterShutdownTimeoutInMinutes *prometheus.Desc
|
||||
clusterUseClientAccessNetworksForSharedVolumes *prometheus.Desc
|
||||
clusterWitnessDatabaseWriteTimeout *prometheus.Desc
|
||||
clusterWitnessDynamicWeight *prometheus.Desc
|
||||
clusterWitnessRestartInterval *prometheus.Desc
|
||||
|
||||
// network
|
||||
networkCharacteristics *prometheus.Desc
|
||||
networkFlags *prometheus.Desc
|
||||
networkMetric *prometheus.Desc
|
||||
networkRole *prometheus.Desc
|
||||
networkState *prometheus.Desc
|
||||
|
||||
// node
|
||||
nodeBuildNumber *prometheus.Desc
|
||||
nodeCharacteristics *prometheus.Desc
|
||||
nodeDetectedCloudPlatform *prometheus.Desc
|
||||
nodeDynamicWeight *prometheus.Desc
|
||||
nodeFlags *prometheus.Desc
|
||||
nodeMajorVersion *prometheus.Desc
|
||||
nodeMinorVersion *prometheus.Desc
|
||||
nodeNeedsPreventQuorum *prometheus.Desc
|
||||
nodeNodeDrainStatus *prometheus.Desc
|
||||
nodeNodeHighestVersion *prometheus.Desc
|
||||
nodeNodeLowestVersion *prometheus.Desc
|
||||
nodeNodeWeight *prometheus.Desc
|
||||
nodeState *prometheus.Desc
|
||||
nodeStatusInformation *prometheus.Desc
|
||||
|
||||
resourceCharacteristics *prometheus.Desc
|
||||
resourceDeadlockTimeout *prometheus.Desc
|
||||
resourceEmbeddedFailureAction *prometheus.Desc
|
||||
resourceFlags *prometheus.Desc
|
||||
resourceIsAlivePollInterval *prometheus.Desc
|
||||
resourceLooksAlivePollInterval *prometheus.Desc
|
||||
resourceMonitorProcessId *prometheus.Desc
|
||||
resourceOwnerNode *prometheus.Desc
|
||||
resourcePendingTimeout *prometheus.Desc
|
||||
resourceResourceClass *prometheus.Desc
|
||||
resourceRestartAction *prometheus.Desc
|
||||
resourceRestartDelay *prometheus.Desc
|
||||
resourceRestartPeriod *prometheus.Desc
|
||||
resourceRestartThreshold *prometheus.Desc
|
||||
resourceRetryPeriodOnFailure *prometheus.Desc
|
||||
resourceState *prometheus.Desc
|
||||
resourceSubClass *prometheus.Desc
|
||||
|
||||
// ResourceGroup
|
||||
resourceGroupAutoFailbackType *prometheus.Desc
|
||||
resourceGroupCharacteristics *prometheus.Desc
|
||||
resourceGroupColdStartSetting *prometheus.Desc
|
||||
resourceGroupDefaultOwner *prometheus.Desc
|
||||
resourceGroupFailbackWindowEnd *prometheus.Desc
|
||||
resourceGroupFailbackWindowStart *prometheus.Desc
|
||||
resourceGroupFailOverPeriod *prometheus.Desc
|
||||
resourceGroupFailOverThreshold *prometheus.Desc
|
||||
resourceGroupFlags *prometheus.Desc
|
||||
resourceGroupGroupType *prometheus.Desc
|
||||
resourceGroupOwnerNode *prometheus.Desc
|
||||
resourceGroupPriority *prometheus.Desc
|
||||
resourceGroupResiliencyPeriod *prometheus.Desc
|
||||
resourceGroupState *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.CollectorsEnabled == nil {
|
||||
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
c.config.CollectorsEnabled = make([]string, 0)
|
||||
|
||||
var collectorsEnabled string
|
||||
|
||||
app.Flag(
|
||||
"collector.mscluster.enabled",
|
||||
"Comma-separated list of collectors to use.",
|
||||
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"Memory"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if len(c.config.CollectorsEnabled) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "cluster") {
|
||||
c.buildCluster()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "network") {
|
||||
c.buildNetwork()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "node") {
|
||||
c.buildNode()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "resource") {
|
||||
c.buildResource()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "resourcegroup") {
|
||||
c.buildResourceGroup()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
if len(c.config.CollectorsEnabled) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
errs []error
|
||||
nodeNames []string
|
||||
)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "cluster") {
|
||||
if err = c.collectCluster(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect cluster metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "network") {
|
||||
if err = c.collectNetwork(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect network metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "node") {
|
||||
if nodeNames, err = c.collectNode(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect node metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "resource") {
|
||||
if err = c.collectResource(ch, nodeNames); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect resource metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "resourcegroup") {
|
||||
if err = c.collectResourceGroup(ch, nodeNames); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect resource group metrics: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,102 +0,0 @@
|
||||
package mscluster
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const nameNetwork = Name + "_network"
|
||||
|
||||
// msClusterNetwork represents the MSCluster_Network WMI class
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-network
|
||||
type msClusterNetwork struct {
|
||||
Name string
|
||||
|
||||
Characteristics uint
|
||||
Flags uint
|
||||
Metric uint
|
||||
Role uint
|
||||
State uint
|
||||
}
|
||||
|
||||
func (c *Collector) buildNetwork() {
|
||||
c.networkCharacteristics = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNetwork, "characteristics"),
|
||||
"Provides the characteristics of the network.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.networkFlags = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNetwork, "flags"),
|
||||
"Provides access to the flags set for the node. ",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.networkMetric = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNetwork, "metric"),
|
||||
"The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.networkRole = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNetwork, "role"),
|
||||
"Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both ",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.networkState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNetwork, "state"),
|
||||
"Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus metric channel.
|
||||
func (c *Collector) collectNetwork(ch chan<- prometheus.Metric) error {
|
||||
var dst []msClusterNetwork
|
||||
|
||||
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Network", &dst, nil, "root/MSCluster"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.networkCharacteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.networkFlags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.networkMetric,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Metric),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.networkRole,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Role),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.networkState,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
package mscluster
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const nameNode = Name + "_node"
|
||||
|
||||
// msClusterNode represents the MSCluster_Node WMI class
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-node
|
||||
type msClusterNode struct {
|
||||
Name string
|
||||
|
||||
BuildNumber uint
|
||||
Characteristics uint
|
||||
DetectedCloudPlatform uint
|
||||
DynamicWeight uint
|
||||
Flags uint
|
||||
MajorVersion uint
|
||||
MinorVersion uint
|
||||
NeedsPreventQuorum uint
|
||||
NodeDrainStatus uint
|
||||
NodeHighestVersion uint
|
||||
NodeLowestVersion uint
|
||||
NodeWeight uint
|
||||
State uint
|
||||
StatusInformation uint
|
||||
}
|
||||
|
||||
func (c *Collector) buildNode() {
|
||||
c.nodeBuildNumber = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "build_number"),
|
||||
"Provides access to the node's BuildNumber property.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeCharacteristics = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "characteristics"),
|
||||
"Provides access to the characteristics set for the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeDetectedCloudPlatform = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "detected_cloud_platform"),
|
||||
"(DetectedCloudPlatform)",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeDynamicWeight = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "dynamic_weight"),
|
||||
"The dynamic vote weight of the node adjusted by dynamic quorum feature.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeFlags = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "flags"),
|
||||
"Provides access to the flags set for the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeMajorVersion = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "major_version"),
|
||||
"Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeMinorVersion = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "minor_version"),
|
||||
"Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeNeedsPreventQuorum = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "needs_prevent_quorum"),
|
||||
"Whether the cluster service on that node should be started with prevent quorum flag.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeNodeDrainStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "node_drain_status"),
|
||||
"The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeNodeHighestVersion = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "node_highest_version"),
|
||||
"Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeNodeLowestVersion = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "node_lowest_version"),
|
||||
"Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeNodeWeight = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "node_weight"),
|
||||
"The vote weight of the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "state"),
|
||||
"Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.nodeStatusInformation = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameNode, "status_information"),
|
||||
"The isolation or quarantine status of the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) collectNode(ch chan<- prometheus.Metric) ([]string, error) {
|
||||
var dst []msClusterNode
|
||||
|
||||
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Node", &dst, nil, "root/MSCluster"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeNames := make([]string, 0, len(dst))
|
||||
|
||||
for _, v := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeBuildNumber,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.BuildNumber),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeCharacteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeDetectedCloudPlatform,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectedCloudPlatform),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeDynamicWeight,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DynamicWeight),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeFlags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeMajorVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MajorVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeMinorVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MinorVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeNeedsPreventQuorum,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NeedsPreventQuorum),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeNodeDrainStatus,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeDrainStatus),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeNodeHighestVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeHighestVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeNodeLowestVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeLowestVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeNodeWeight,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeWeight),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeState,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nodeStatusInformation,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.StatusInformation),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
nodeNames = append(nodeNames, v.Name)
|
||||
}
|
||||
|
||||
return nodeNames, nil
|
||||
}
|
||||
@@ -1,284 +0,0 @@
|
||||
package mscluster
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const nameResource = Name + "_resource"
|
||||
|
||||
// msClusterResource represents the MSCluster_Resource WMI class
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resource
|
||||
type msClusterResource struct {
|
||||
Name string
|
||||
Type string
|
||||
OwnerGroup string
|
||||
OwnerNode string
|
||||
|
||||
Characteristics uint
|
||||
DeadlockTimeout uint
|
||||
EmbeddedFailureAction uint
|
||||
Flags uint
|
||||
IsAlivePollInterval uint
|
||||
LooksAlivePollInterval uint
|
||||
MonitorProcessId uint
|
||||
PendingTimeout uint
|
||||
ResourceClass uint
|
||||
RestartAction uint
|
||||
RestartDelay uint
|
||||
RestartPeriod uint
|
||||
RestartThreshold uint
|
||||
RetryPeriodOnFailure uint
|
||||
State uint
|
||||
Subclass uint
|
||||
}
|
||||
|
||||
func (c *Collector) buildResource() {
|
||||
c.resourceCharacteristics = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "characteristics"),
|
||||
"Provides the characteristics of the object.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceDeadlockTimeout = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "deadlock_timeout"),
|
||||
"Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceEmbeddedFailureAction = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "embedded_failure_action"),
|
||||
"The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceFlags = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "flags"),
|
||||
"Provides access to the flags set for the object.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceIsAlivePollInterval = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "is_alive_poll_interval"),
|
||||
"Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceLooksAlivePollInterval = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "looks_alive_poll_interval"),
|
||||
"Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceMonitorProcessId = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "monitor_process_id"),
|
||||
"Provides the process ID of the resource host service that is currently hosting the resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceOwnerNode = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "owner_node"),
|
||||
"The node hosting the resource. 0: Not hosted; 1: Hosted",
|
||||
[]string{"type", "owner_group", "node_name", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceOwnerNode = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "owner_node"),
|
||||
"The node hosting the resource. 0: Not hosted; 1: Hosted",
|
||||
[]string{"type", "owner_group", "node_name", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourcePendingTimeout = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "pending_timeout"),
|
||||
"Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceResourceClass = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "resource_class"),
|
||||
"Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown ",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceRestartAction = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "restart_action"),
|
||||
"Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceRestartDelay = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "restart_delay"),
|
||||
"Indicates the time delay before a failed resource is restarted.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceRestartPeriod = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "restart_period"),
|
||||
"Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceRestartThreshold = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "restart_threshold"),
|
||||
"Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceRetryPeriodOnFailure = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "retry_period_on_failure"),
|
||||
"Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "state"),
|
||||
"The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending ",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceSubClass = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResource, "subclass"),
|
||||
"Provides the list of references to nodes that can be the owner of this resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) collectResource(ch chan<- prometheus.Metric, nodeNames []string) error {
|
||||
var dst []msClusterResource
|
||||
|
||||
if err := c.wmiClient.Query("SELECT * FROM MSCluster_Resource", &dst, nil, "root/MSCluster"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceCharacteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceDeadlockTimeout,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DeadlockTimeout),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceEmbeddedFailureAction,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.EmbeddedFailureAction),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceFlags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceIsAlivePollInterval,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.IsAlivePollInterval),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceLooksAlivePollInterval,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.LooksAlivePollInterval),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceMonitorProcessId,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MonitorProcessId),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
for _, nodeName := range nodeNames {
|
||||
isCurrentState := 0.0
|
||||
if v.OwnerNode == nodeName {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceOwnerNode,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
v.Type, v.OwnerGroup, nodeName, v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourcePendingTimeout,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.PendingTimeout),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceResourceClass,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.ResourceClass),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceRestartAction,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartAction),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceRestartDelay,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartDelay),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceRestartPeriod,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartPeriod),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceRestartThreshold,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartThreshold),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceRetryPeriodOnFailure,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RetryPeriodOnFailure),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceState,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceSubClass,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Subclass),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,240 +0,0 @@
|
||||
package mscluster
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const nameResourceGroup = Name + "_resourcegroup"
|
||||
|
||||
// msClusterResourceGroup represents the MSCluster_ResourceGroup WMI class
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resourcegroup
|
||||
type msClusterResourceGroup struct {
|
||||
Name string
|
||||
|
||||
AutoFailbackType uint
|
||||
Characteristics uint
|
||||
ColdStartSetting uint
|
||||
DefaultOwner uint
|
||||
FailbackWindowEnd int
|
||||
FailbackWindowStart int
|
||||
FailoverPeriod uint
|
||||
FailoverThreshold uint
|
||||
Flags uint
|
||||
GroupType uint
|
||||
OwnerNode string
|
||||
Priority uint
|
||||
ResiliencyPeriod uint
|
||||
State uint
|
||||
}
|
||||
|
||||
func (c *Collector) buildResourceGroup() {
|
||||
c.resourceGroupAutoFailbackType = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "auto_failback_type"),
|
||||
"Provides access to the group's AutoFailbackType property.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupCharacteristics = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "characteristics"),
|
||||
"Provides the characteristics of the group.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupColdStartSetting = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "cold_start_setting"),
|
||||
"Indicates whether a group can start after a cluster cold start.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupDefaultOwner = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "default_owner"),
|
||||
"Number of the last node the resource group was activated on or explicitly moved to.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupFailbackWindowEnd = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failback_window_end"),
|
||||
"The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupFailbackWindowStart = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failback_window_start"),
|
||||
"The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupFailOverPeriod = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failover_period"),
|
||||
"The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupFailOverThreshold = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "failover_threshold"),
|
||||
"The FailoverThreshold property specifies the maximum number of failover attempts.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupFlags = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "flags"),
|
||||
"Provides access to the flags set for the group. ",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupGroupType = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "group_type"),
|
||||
"The Type of the resource group.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupOwnerNode = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "owner_node"),
|
||||
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
|
||||
[]string{"node_name", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupOwnerNode = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "owner_node"),
|
||||
"The node hosting the resource group. 0: Not hosted; 1: Hosted",
|
||||
[]string{"node_name", "name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupPriority = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "priority"),
|
||||
"Priority value of the resource group",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupResiliencyPeriod = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "resiliency_period"),
|
||||
"The resiliency period for this group, in seconds.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.resourceGroupState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, nameResourceGroup, "state"),
|
||||
"The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) collectResourceGroup(ch chan<- prometheus.Metric, nodeNames []string) error {
|
||||
var dst []msClusterResourceGroup
|
||||
|
||||
if err := c.wmiClient.Query("SELECT * FROM MSCluster_ResourceGroup", &dst, nil, "root/MSCluster"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupAutoFailbackType,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.AutoFailbackType),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupCharacteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupColdStartSetting,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.ColdStartSetting),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupDefaultOwner,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DefaultOwner),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupFailbackWindowEnd,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailbackWindowEnd),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupFailbackWindowStart,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailbackWindowStart),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupFailOverPeriod,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailoverPeriod),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupFailOverThreshold,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailoverThreshold),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupFlags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupGroupType,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.GroupType),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
for _, nodeName := range nodeNames {
|
||||
isCurrentState := 0.0
|
||||
if v.OwnerNode == nodeName {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupOwnerNode,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
nodeName, v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupPriority,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Priority),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupResiliencyPeriod,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.ResiliencyPeriod),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.resourceGroupState,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package msmq
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "msmq"
|
||||
|
||||
type Config struct {
|
||||
QueryWhereClause *string `yaml:"query_where_clause"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
QueryWhereClause: utils.ToPTR(""),
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
bytesInJournalQueue *prometheus.Desc
|
||||
bytesInQueue *prometheus.Desc
|
||||
messagesInJournalQueue *prometheus.Desc
|
||||
messagesInQueue *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.QueryWhereClause == nil {
|
||||
config.QueryWhereClause = ConfigDefaults.QueryWhereClause
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
app.Flag("collector.msmq.msmq-where", "WQL 'where' clause to use in WMI metrics query. "+
|
||||
"Limits the response to the msmqs you specify and reduces the size of the response.").
|
||||
Default(*c.config.QueryWhereClause).StringVar(c.config.QueryWhereClause)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, wmiClient *wmi.Client) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
|
||||
if *c.config.QueryWhereClause == "" {
|
||||
logger.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
c.bytesInJournalQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"),
|
||||
"Size of queue journal in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.bytesInQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_queue"),
|
||||
"Size of queue in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.messagesInJournalQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "messages_in_journal_queue"),
|
||||
"Count messages in queue journal",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
c.messagesInQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "messages_in_queue"),
|
||||
"Count messages in queue",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting msmq metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type msmqQueue struct {
|
||||
Name string
|
||||
|
||||
BytesInJournalQueue uint64
|
||||
BytesInQueue uint64
|
||||
MessagesInJournalQueue uint64
|
||||
MessagesInQueue uint64
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
var dst []msmqQueue
|
||||
|
||||
query := "SELECT * FROM Win32_PerfRawData_MSMQ_MSMQQueue"
|
||||
if *c.config.QueryWhereClause != "" {
|
||||
query += " WHERE " + *c.config.QueryWhereClause
|
||||
}
|
||||
|
||||
if err := c.wmiClient.Query(query, &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, msmq := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesInJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesInJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesInQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesInQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.messagesInJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesInJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.messagesInQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesInQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package msmq_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/msmq"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
// No context name required as Collector source is WMI
|
||||
testutils.FuncBenchmarkCollector(b, msmq.Name, msmq.NewWithFlags)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,12 +0,0 @@
|
||||
package mssql_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/mssql"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, mssql.Name, mssql.NewWithFlags)
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package net
|
||||
|
||||
const (
|
||||
BytesReceivedPerSec = "Bytes Received/sec"
|
||||
BytesSentPerSec = "Bytes Sent/sec"
|
||||
BytesTotalPerSec = "Bytes Total/sec"
|
||||
OutputQueueLength = "Output Queue Length"
|
||||
PacketsOutboundDiscarded = "Packets Outbound Discarded"
|
||||
PacketsOutboundErrors = "Packets Outbound Errors"
|
||||
PacketsPerSec = "Packets/sec"
|
||||
PacketsReceivedDiscarded = "Packets Received Discarded"
|
||||
PacketsReceivedErrors = "Packets Received Errors"
|
||||
PacketsReceivedPerSec = "Packets Received/sec"
|
||||
PacketsReceivedUnknown = "Packets Received Unknown"
|
||||
PacketsSentPerSec = "Packets Sent/sec"
|
||||
CurrentBandwidth = "Current Bandwidth"
|
||||
)
|
||||
|
||||
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
|
||||
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
|
||||
type perflibNetworkInterface struct {
|
||||
BytesReceivedPerSec float64 `perflib:"Bytes Received/sec"`
|
||||
BytesSentPerSec float64 `perflib:"Bytes Sent/sec"`
|
||||
BytesTotalPerSec float64 `perflib:"Bytes Total/sec"`
|
||||
Name string
|
||||
OutputQueueLength float64 `perflib:"Output Queue Length"`
|
||||
PacketsOutboundDiscarded float64 `perflib:"Packets Outbound Discarded"`
|
||||
PacketsOutboundErrors float64 `perflib:"Packets Outbound Errors"`
|
||||
PacketsPerSec float64 `perflib:"Packets/sec"`
|
||||
PacketsReceivedDiscarded float64 `perflib:"Packets Received Discarded"`
|
||||
PacketsReceivedErrors float64 `perflib:"Packets Received Errors"`
|
||||
PacketsReceivedPerSec float64 `perflib:"Packets Received/sec"`
|
||||
PacketsReceivedUnknown float64 `perflib:"Packets Received Unknown"`
|
||||
PacketsSentPerSec float64 `perflib:"Packets Sent/sec"`
|
||||
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
|
||||
}
|
||||
@@ -1,599 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const Name = "net"
|
||||
|
||||
type Config struct {
|
||||
NicExclude *regexp.Regexp `yaml:"nic_exclude"`
|
||||
NicInclude *regexp.Regexp `yaml:"nic_include"`
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
NicExclude: types.RegExpEmpty,
|
||||
NicInclude: types.RegExpAny,
|
||||
CollectorsEnabled: []string{
|
||||
"metrics",
|
||||
"nic_addresses",
|
||||
},
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for Perflib Network Interface metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
perfDataCollector *perfdata.Collector
|
||||
|
||||
bytesReceivedTotal *prometheus.Desc
|
||||
bytesSentTotal *prometheus.Desc
|
||||
bytesTotal *prometheus.Desc
|
||||
outputQueueLength *prometheus.Desc
|
||||
packetsOutboundDiscarded *prometheus.Desc
|
||||
packetsOutboundErrors *prometheus.Desc
|
||||
packetsTotal *prometheus.Desc
|
||||
packetsReceivedDiscarded *prometheus.Desc
|
||||
packetsReceivedErrors *prometheus.Desc
|
||||
packetsReceivedTotal *prometheus.Desc
|
||||
packetsReceivedUnknown *prometheus.Desc
|
||||
packetsSentTotal *prometheus.Desc
|
||||
currentBandwidth *prometheus.Desc
|
||||
|
||||
nicAddressInfo *prometheus.Desc
|
||||
routeInfo *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.NicExclude == nil {
|
||||
config.NicExclude = ConfigDefaults.NicExclude
|
||||
}
|
||||
|
||||
if config.NicInclude == nil {
|
||||
config.NicInclude = ConfigDefaults.NicInclude
|
||||
}
|
||||
|
||||
if config.CollectorsEnabled == nil {
|
||||
config.CollectorsEnabled = ConfigDefaults.CollectorsEnabled
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
c.config.CollectorsEnabled = make([]string, 0)
|
||||
|
||||
var nicExclude, nicInclude string
|
||||
|
||||
var collectorsEnabled string
|
||||
|
||||
app.Flag(
|
||||
"collector.net.nic-exclude",
|
||||
"Regexp of NIC:s to exclude. NIC name must both match include and not match exclude to be included.",
|
||||
).Default(c.config.NicExclude.String()).StringVar(&nicExclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.net.nic-include",
|
||||
"Regexp of NIC:s to include. NIC name must both match include and not match exclude to be included.",
|
||||
).Default(c.config.NicInclude.String()).StringVar(&nicInclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.net.enabled",
|
||||
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
|
||||
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
|
||||
|
||||
var err error
|
||||
|
||||
c.config.NicExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", nicExclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.net.nic-exclude: %w", err)
|
||||
}
|
||||
|
||||
c.config.NicInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", nicInclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.net.nic-include: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
if utils.PDHEnabled() {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return []string{"Network Interface"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
|
||||
if utils.PDHEnabled() {
|
||||
counters := []string{
|
||||
BytesReceivedPerSec,
|
||||
BytesSentPerSec,
|
||||
BytesTotalPerSec,
|
||||
OutputQueueLength,
|
||||
PacketsOutboundDiscarded,
|
||||
PacketsOutboundErrors,
|
||||
PacketsPerSec,
|
||||
PacketsReceivedDiscarded,
|
||||
PacketsReceivedErrors,
|
||||
PacketsReceivedPerSec,
|
||||
PacketsReceivedUnknown,
|
||||
PacketsSentPerSec,
|
||||
CurrentBandwidth,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Network Interface", []string{"*"}, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "addresses") {
|
||||
logger.Info("nic/addresses collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
}
|
||||
|
||||
c.bytesReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"),
|
||||
"(Network.BytesReceivedPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.bytesSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_sent_total"),
|
||||
"(Network.BytesSentPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.bytesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_total"),
|
||||
"(Network.BytesTotalPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.outputQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "output_queue_length_packets"),
|
||||
"(Network.OutputQueueLength)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsOutboundDiscarded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_outbound_discarded_total"),
|
||||
"(Network.PacketsOutboundDiscarded)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsOutboundErrors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_outbound_errors_total"),
|
||||
"(Network.PacketsOutboundErrors)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsReceivedDiscarded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_discarded_total"),
|
||||
"(Network.PacketsReceivedDiscarded)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsReceivedErrors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_errors_total"),
|
||||
"(Network.PacketsReceivedErrors)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
|
||||
"(Network.PacketsReceivedPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsReceivedUnknown = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_unknown_total"),
|
||||
"(Network.PacketsReceivedUnknown)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_total"),
|
||||
"(Network.PacketsPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.packetsSentTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_sent_total"),
|
||||
"(Network.PacketsSentPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.currentBandwidth = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_bandwidth_bytes"),
|
||||
"(Network.CurrentBandwidth)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
)
|
||||
c.nicAddressInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "nic_address_info"),
|
||||
"A metric with a constant '1' value labeled with the network interface's address information.",
|
||||
[]string{"nic", "friendly_name", "address", "family"},
|
||||
nil,
|
||||
)
|
||||
c.routeInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "route_info"),
|
||||
"A metric with a constant '1' value labeled with the network interface's route information.",
|
||||
[]string{"nic", "src", "dest", "metric"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
|
||||
var err error
|
||||
|
||||
if utils.PDHEnabled() {
|
||||
err = c.collectPDH(ch)
|
||||
} else {
|
||||
err = c.collect(ctx, logger, ch)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed collecting net metrics: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "nic_addresses") {
|
||||
if err := c.collectNICAddresses(ch); err != nil {
|
||||
return fmt.Errorf("failed collecting net addresses: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
var dst []perflibNetworkInterface
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["Network Interface"], &dst, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, nic := range dst {
|
||||
if c.config.NicExclude.MatchString(nic.Name) ||
|
||||
!c.config.NicInclude.MatchString(nic.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Counters
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
nic.BytesReceivedPerSec,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
nic.BytesSentPerSec,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesTotal,
|
||||
prometheus.CounterValue,
|
||||
nic.BytesTotalPerSec,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
nic.OutputQueueLength,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsOutboundDiscarded,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsOutboundDiscarded,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsOutboundErrors,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsOutboundErrors,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsTotal,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsPerSec,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedDiscarded,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsReceivedDiscarded,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedErrors,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsReceivedErrors,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsReceivedPerSec,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedUnknown,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsReceivedUnknown,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
nic.PacketsSentPerSec,
|
||||
nic.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
nic.CurrentBandwidth/8,
|
||||
nic.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
data, err := c.perfDataCollector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Network Information metrics: %w", err)
|
||||
}
|
||||
|
||||
for nicName, nicData := range data {
|
||||
if c.config.NicExclude.MatchString(nicName) ||
|
||||
!c.config.NicInclude.MatchString(nicName) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Counters
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
nicData[BytesReceivedPerSec].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
nicData[BytesSentPerSec].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesTotal,
|
||||
prometheus.CounterValue,
|
||||
nicData[BytesTotalPerSec].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
nicData[OutputQueueLength].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsOutboundDiscarded,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsOutboundDiscarded].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsOutboundErrors,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsOutboundErrors].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsTotal,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsPerSec].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedDiscarded,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsReceivedDiscarded].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedErrors,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsReceivedErrors].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsReceivedPerSec].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsReceivedUnknown,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsReceivedUnknown].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.packetsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
nicData[PacketsSentPerSec].FirstValue,
|
||||
nicName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
nicData[CurrentBandwidth].FirstValue/8,
|
||||
nicName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var addressFamily = map[uint16]string{
|
||||
windows.AF_INET: "ipv4",
|
||||
windows.AF_INET6: "ipv6",
|
||||
}
|
||||
|
||||
func (c *Collector) collectNICAddresses(ch chan<- prometheus.Metric) error {
|
||||
nicAdapterAddresses, err := adapterAddresses()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
convertNicName := strings.NewReplacer("(", "[", ")", "]")
|
||||
|
||||
for _, nicAdapterAddress := range nicAdapterAddresses {
|
||||
friendlyName := windows.UTF16PtrToString(nicAdapterAddress.FriendlyName)
|
||||
nicName := windows.UTF16PtrToString(nicAdapterAddress.Description)
|
||||
|
||||
if c.config.NicExclude.MatchString(nicName) ||
|
||||
!c.config.NicInclude.MatchString(nicName) {
|
||||
continue
|
||||
}
|
||||
|
||||
for address := nicAdapterAddress.FirstUnicastAddress; address != nil; address = address.Next {
|
||||
ipAddr := address.Address.IP()
|
||||
|
||||
if ipAddr == nil || !ipAddr.IsGlobalUnicast() {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nicAddressInfo,
|
||||
prometheus.GaugeValue,
|
||||
1,
|
||||
convertNicName.Replace(nicName),
|
||||
friendlyName,
|
||||
ipAddr.String(),
|
||||
addressFamily[address.Address.Sockaddr.Addr.Family],
|
||||
)
|
||||
}
|
||||
|
||||
for address := nicAdapterAddress.FirstAnycastAddress; address != nil; address = address.Next {
|
||||
ipAddr := address.Address.IP()
|
||||
|
||||
if ipAddr == nil || !ipAddr.IsGlobalUnicast() {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.nicAddressInfo,
|
||||
prometheus.GaugeValue,
|
||||
1,
|
||||
convertNicName.Replace(nicName),
|
||||
friendlyName,
|
||||
ipAddr.String(),
|
||||
addressFamily[address.Address.Sockaddr.Addr.Family],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// adapterAddresses returns a list of IP adapter and address
|
||||
// structures. The structure contains an IP adapter and flattened
|
||||
// multiple IP addresses including unicast, anycast and multicast
|
||||
// addresses.
|
||||
func adapterAddresses() ([]*windows.IpAdapterAddresses, error) {
|
||||
var b []byte
|
||||
|
||||
l := uint32(15000) // recommended initial size
|
||||
|
||||
for {
|
||||
b = make([]byte, l)
|
||||
|
||||
const flags = windows.GAA_FLAG_SKIP_MULTICAST | windows.GAA_FLAG_SKIP_DNS_SERVER
|
||||
|
||||
err := windows.GetAdaptersAddresses(windows.AF_UNSPEC, flags, 0, (*windows.IpAdapterAddresses)(unsafe.Pointer(&b[0])), &l)
|
||||
if err == nil {
|
||||
if l == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if !errors.Is(err, windows.ERROR_BUFFER_OVERFLOW) {
|
||||
return nil, os.NewSyscallError("getadaptersaddresses", err)
|
||||
}
|
||||
|
||||
if l <= uint32(len(b)) {
|
||||
return nil, os.NewSyscallError("getadaptersaddresses", err)
|
||||
}
|
||||
}
|
||||
|
||||
var addresses []*windows.IpAdapterAddresses
|
||||
for address := (*windows.IpAdapterAddresses)(unsafe.Pointer(&b[0])); address != nil; address = address.Next {
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package net_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/net"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
// PrinterInclude is not set in testing context (kingpin flags not parsed), causing the collector to skip all interfaces.
|
||||
localNicInclude := ".+"
|
||||
|
||||
kingpin.CommandLine.GetArg("collector.net.nic-include").StringVar(&localNicInclude)
|
||||
testutils.FuncBenchmarkCollector(b, net.Name, net.NewWithFlags)
|
||||
}
|
||||
@@ -1,245 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "netframework"
|
||||
|
||||
type Config struct {
|
||||
CollectorsEnabled []string `yaml:"collectors_enabled"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
CollectorsEnabled: []string{
|
||||
collectorClrExceptions,
|
||||
collectorClrInterop,
|
||||
collectorClrJIT,
|
||||
collectorClrLoading,
|
||||
collectorClrLocksAndThreads,
|
||||
collectorClrMemory,
|
||||
collectorClrRemoting,
|
||||
collectorClrSecurity,
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
collectorClrExceptions = "clrexceptions"
|
||||
collectorClrInterop = "clrinterop"
|
||||
collectorClrJIT = "clrjit"
|
||||
collectorClrLoading = "clrloading"
|
||||
collectorClrLocksAndThreads = "clrlocksandthreads"
|
||||
collectorClrMemory = "clrmemory"
|
||||
collectorClrRemoting = "clrremoting"
|
||||
collectorClrSecurity = "clrsecurity"
|
||||
)
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
// clrexceptions
|
||||
numberOfExceptionsThrown *prometheus.Desc
|
||||
numberOfFilters *prometheus.Desc
|
||||
numberOfFinally *prometheus.Desc
|
||||
throwToCatchDepth *prometheus.Desc
|
||||
|
||||
// clrinterop
|
||||
numberOfCCWs *prometheus.Desc
|
||||
numberOfMarshalling *prometheus.Desc
|
||||
numberOfStubs *prometheus.Desc
|
||||
|
||||
// clrjit
|
||||
numberOfMethodsJitted *prometheus.Desc
|
||||
timeInJit *prometheus.Desc
|
||||
standardJitFailures *prometheus.Desc
|
||||
totalNumberOfILBytesJitted *prometheus.Desc
|
||||
|
||||
// clrloading
|
||||
bytesInLoaderHeap *prometheus.Desc
|
||||
currentAppDomains *prometheus.Desc
|
||||
currentAssemblies *prometheus.Desc
|
||||
currentClassesLoaded *prometheus.Desc
|
||||
totalAppDomains *prometheus.Desc
|
||||
totalAppDomainsUnloaded *prometheus.Desc
|
||||
totalAssemblies *prometheus.Desc
|
||||
totalClassesLoaded *prometheus.Desc
|
||||
totalNumberOfLoadFailures *prometheus.Desc
|
||||
|
||||
// clrlocksandthreads
|
||||
currentQueueLength *prometheus.Desc
|
||||
numberOfCurrentLogicalThreads *prometheus.Desc
|
||||
numberOfCurrentPhysicalThreads *prometheus.Desc
|
||||
numberOfCurrentRecognizedThreads *prometheus.Desc
|
||||
numberOfTotalRecognizedThreads *prometheus.Desc
|
||||
queueLengthPeak *prometheus.Desc
|
||||
totalNumberOfContentions *prometheus.Desc
|
||||
|
||||
// clrmemory
|
||||
allocatedBytes *prometheus.Desc
|
||||
finalizationSurvivors *prometheus.Desc
|
||||
heapSize *prometheus.Desc
|
||||
promotedBytes *prometheus.Desc
|
||||
numberGCHandles *prometheus.Desc
|
||||
numberCollections *prometheus.Desc
|
||||
numberInducedGC *prometheus.Desc
|
||||
numberOfPinnedObjects *prometheus.Desc
|
||||
numberOfSinkBlocksInUse *prometheus.Desc
|
||||
numberTotalCommittedBytes *prometheus.Desc
|
||||
numberTotalReservedBytes *prometheus.Desc
|
||||
timeInGC *prometheus.Desc
|
||||
|
||||
// clrremoting
|
||||
channels *prometheus.Desc
|
||||
contextBoundClassesLoaded *prometheus.Desc
|
||||
contextBoundObjects *prometheus.Desc
|
||||
contextProxies *prometheus.Desc
|
||||
contexts *prometheus.Desc
|
||||
totalRemoteCalls *prometheus.Desc
|
||||
|
||||
// clrsecurity
|
||||
numberLinkTimeChecks *prometheus.Desc
|
||||
timeInRTChecks *prometheus.Desc
|
||||
stackWalkDepth *prometheus.Desc
|
||||
totalRuntimeChecks *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrExceptions) {
|
||||
c.buildClrExceptions()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrInterop) {
|
||||
c.buildClrInterop()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrJIT) {
|
||||
c.buildClrJIT()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLoading) {
|
||||
c.buildClrLoading()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLocksAndThreads) {
|
||||
c.buildClrLocksAndThreads()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrMemory) {
|
||||
c.buildClrMemory()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrRemoting) {
|
||||
c.buildClrRemoting()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrSecurity) {
|
||||
c.buildClrSecurity()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
var (
|
||||
err error
|
||||
errs []error
|
||||
)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrExceptions) {
|
||||
if err = c.collectClrExceptions(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrExceptions, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrInterop) {
|
||||
if err = c.collectClrInterop(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrInterop, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrJIT) {
|
||||
if err = c.collectClrJIT(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrJIT, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLoading) {
|
||||
if err = c.collectClrLoading(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrLoading, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrLocksAndThreads) {
|
||||
if err = c.collectClrLocksAndThreads(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrLocksAndThreads, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrMemory) {
|
||||
if err = c.collectClrMemory(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrMemory, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrRemoting) {
|
||||
if err = c.collectClrRemoting(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrRemoting, err))
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorClrSecurity) {
|
||||
if err = c.collectClrSecurity(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect %s metrics: %w", collectorClrSecurity, err))
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrExceptions() {
|
||||
c.numberOfExceptionsThrown = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "exceptions_thrown_total"),
|
||||
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfFilters = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "exceptions_filters_total"),
|
||||
"Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfFinally = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "exceptions_finallys_total"),
|
||||
"Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.throwToCatchDepth = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "throw_to_catch_depth_total"),
|
||||
"Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
|
||||
Name string
|
||||
|
||||
NumberofExcepsThrown uint32
|
||||
NumberofExcepsThrownPersec uint32
|
||||
NumberofFiltersPersec uint32
|
||||
NumberofFinallysPersec uint32
|
||||
ThrowToCatchDepthPersec uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrExceptions(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRExceptions", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfExceptionsThrown,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofExcepsThrown),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfFilters,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofFiltersPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfFinally,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofFinallysPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.throwToCatchDepth,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ThrowToCatchDepthPersec),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrInterop() {
|
||||
c.numberOfCCWs = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "com_callable_wrappers_total"),
|
||||
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfMarshalling = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "interop_marshalling_total"),
|
||||
"Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfStubs = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "interop_stubs_created_total"),
|
||||
"Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
|
||||
Name string
|
||||
|
||||
NumberofCCWs uint32
|
||||
Numberofmarshalling uint32
|
||||
NumberofStubs uint32
|
||||
NumberofTLBexportsPersec uint32
|
||||
NumberofTLBimportsPersec uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrInterop(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRInterop", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfCCWs,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofCCWs),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfMarshalling,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Numberofmarshalling),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfStubs,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofStubs),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrJIT() {
|
||||
c.numberOfMethodsJitted = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_methods_total"),
|
||||
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.timeInJit = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_time_percent"),
|
||||
"Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.standardJitFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_standard_failures_total"),
|
||||
"Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalNumberOfILBytesJitted = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "jit_il_bytes_total"),
|
||||
"Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRJit struct {
|
||||
Name string
|
||||
|
||||
Frequency_PerfTime uint32
|
||||
ILBytesJittedPersec uint32
|
||||
NumberofILBytesJitted uint32
|
||||
NumberofMethodsJitted uint32
|
||||
PercentTimeinJit uint32
|
||||
StandardJitFailures uint32
|
||||
TotalNumberofILBytesJitted uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrJIT(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRJit", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfMethodsJitted,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofMethodsJitted),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeInJit,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PercentTimeinJit)/float64(process.Frequency_PerfTime),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.standardJitFailures,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.StandardJitFailures),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalNumberOfILBytesJitted,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofILBytesJitted),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrLoading() {
|
||||
c.bytesInLoaderHeap = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "loader_heap_size_bytes"),
|
||||
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.currentAppDomains = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_current"),
|
||||
"Displays the current number of application domains loaded in this application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.currentAssemblies = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_current"),
|
||||
"Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.currentClassesLoaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_current"),
|
||||
"Displays the current number of classes loaded in all assemblies.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalAppDomains = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "appdomains_loaded_total"),
|
||||
"Displays the peak number of application domains loaded since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalAppDomainsUnloaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "appdomains_unloaded_total"),
|
||||
"Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalAssemblies = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "assemblies_loaded_total"),
|
||||
"Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalClassesLoaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "classes_loaded_total"),
|
||||
"Displays the cumulative number of classes loaded in all assemblies since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalNumberOfLoadFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "class_load_failures_total"),
|
||||
"Displays the peak number of classes that have failed to load since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
|
||||
Name string
|
||||
|
||||
AssemblySearchLength uint32
|
||||
BytesinLoaderHeap uint64
|
||||
Currentappdomains uint32
|
||||
CurrentAssemblies uint32
|
||||
CurrentClassesLoaded uint32
|
||||
PercentTimeLoading uint64
|
||||
Rateofappdomains uint32
|
||||
Rateofappdomainsunloaded uint32
|
||||
RateofAssemblies uint32
|
||||
RateofClassesLoaded uint32
|
||||
RateofLoadFailures uint32
|
||||
TotalAppdomains uint32
|
||||
Totalappdomainsunloaded uint32
|
||||
TotalAssemblies uint32
|
||||
TotalClassesLoaded uint32
|
||||
TotalNumberofLoadFailures uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrLoading(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRLoading", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.bytesInLoaderHeap,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.BytesinLoaderHeap),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentAppDomains,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Currentappdomains),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentAssemblies,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentAssemblies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentClassesLoaded,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalAppDomains,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalAppdomains),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalAppDomainsUnloaded,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Totalappdomainsunloaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalAssemblies,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalAssemblies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalClassesLoaded,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalNumberOfLoadFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofLoadFailures),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrLocksAndThreads() {
|
||||
c.currentQueueLength = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_queue_length"),
|
||||
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfCurrentLogicalThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "current_logical_threads"),
|
||||
"Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. ",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfCurrentPhysicalThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "physical_threads_current"),
|
||||
"Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfCurrentRecognizedThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_current"),
|
||||
"Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfTotalRecognizedThreads = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "recognized_threads_total"),
|
||||
"Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.queueLengthPeak = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "queue_length_total"),
|
||||
"Displays the total number of threads that waited to acquire a managed lock since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalNumberOfContentions = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "contentions_total"),
|
||||
"Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
|
||||
Name string
|
||||
|
||||
ContentionRatePersec uint32
|
||||
CurrentQueueLength uint32
|
||||
NumberofcurrentlogicalThreads uint32
|
||||
NumberofcurrentphysicalThreads uint32
|
||||
Numberofcurrentrecognizedthreads uint32
|
||||
Numberoftotalrecognizedthreads uint32
|
||||
QueueLengthPeak uint32
|
||||
QueueLengthPersec uint32
|
||||
RateOfRecognizedThreadsPersec uint32
|
||||
TotalNumberofContentions uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrLocksAndThreads(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentQueueLength),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfCurrentLogicalThreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofcurrentlogicalThreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfCurrentPhysicalThreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofcurrentphysicalThreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfCurrentRecognizedThreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Numberofcurrentrecognizedthreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfTotalRecognizedThreads,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Numberoftotalrecognizedthreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.queueLengthPeak,
|
||||
prometheus.CounterValue,
|
||||
float64(process.QueueLengthPeak),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalNumberOfContentions,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofContentions),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,267 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrMemory() {
|
||||
c.allocatedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "allocated_bytes_total"),
|
||||
"Displays the total number of bytes allocated on the garbage collection heap.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.finalizationSurvivors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "finalization_survivors"),
|
||||
"Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.heapSize = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "heap_size_bytes"),
|
||||
"Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
)
|
||||
c.promotedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "promoted_bytes"),
|
||||
"Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
)
|
||||
c.numberGCHandles = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "number_gc_handles"),
|
||||
"Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberCollections = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "collections_total"),
|
||||
"Displays the number of times the generation objects are garbage collected since the application started.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
)
|
||||
c.numberInducedGC = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "induced_gc_total"),
|
||||
"Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfPinnedObjects = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "number_pinned_objects"),
|
||||
"Displays the number of pinned objects encountered in the last garbage collection.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberOfSinkBlocksInUse = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "number_sink_blocksinuse"),
|
||||
"Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberTotalCommittedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "committed_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.numberTotalReservedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "reserved_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.timeInGC = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gc_time_percent"),
|
||||
"Displays the percentage of time that was spent performing a garbage collection in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
|
||||
Name string
|
||||
|
||||
AllocatedBytesPersec uint64
|
||||
FinalizationSurvivors uint64
|
||||
Frequency_PerfTime uint64
|
||||
Gen0heapsize uint64
|
||||
Gen0PromotedBytesPerSec uint64
|
||||
Gen1heapsize uint64
|
||||
Gen1PromotedBytesPerSec uint64
|
||||
Gen2heapsize uint64
|
||||
LargeObjectHeapsize uint64
|
||||
NumberBytesinallHeaps uint64
|
||||
NumberGCHandles uint64
|
||||
NumberGen0Collections uint64
|
||||
NumberGen1Collections uint64
|
||||
NumberGen2Collections uint64
|
||||
NumberInducedGC uint64
|
||||
NumberofPinnedObjects uint64
|
||||
NumberofSinkBlocksinuse uint64
|
||||
NumberTotalcommittedBytes uint64
|
||||
NumberTotalreservedBytes uint64
|
||||
// PercentTimeinGC has countertype=PERF_RAW_FRACTION.
|
||||
// Formula: (100 * CounterValue) / BaseValue
|
||||
// By docs https://docs.microsoft.com/en-us/previous-versions/windows/internet-explorer/ie-developer/scripting-articles/ms974615(v=msdn.10)#perf_raw_fraction
|
||||
PercentTimeinGC uint32
|
||||
// BaseValue is just a "magic" number used to make the calculation come out right.
|
||||
PercentTimeinGC_base uint32
|
||||
ProcessID uint64
|
||||
PromotedFinalizationMemoryfromGen0 uint64
|
||||
PromotedMemoryfromGen0 uint64
|
||||
PromotedMemoryfromGen1 uint64
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrMemory(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRMemory", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.allocatedBytes,
|
||||
prometheus.CounterValue,
|
||||
float64(process.AllocatedBytesPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.finalizationSurvivors,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.FinalizationSurvivors),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.heapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen0heapsize),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.promotedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen0PromotedBytesPerSec),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.heapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen1heapsize),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.promotedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen1PromotedBytesPerSec),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.heapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen2heapsize),
|
||||
process.Name,
|
||||
"Gen2",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.heapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.LargeObjectHeapsize),
|
||||
process.Name,
|
||||
"LOH",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberGCHandles,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberGCHandles),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen0Collections),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen1Collections),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen2Collections),
|
||||
process.Name,
|
||||
"Gen2",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberInducedGC,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberInducedGC),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfPinnedObjects,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofPinnedObjects),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberOfSinkBlocksInUse,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofSinkBlocksinuse),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberTotalCommittedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberTotalcommittedBytes),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberTotalReservedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberTotalreservedBytes),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeInGC,
|
||||
prometheus.GaugeValue,
|
||||
float64(100*process.PercentTimeinGC)/float64(process.PercentTimeinGC_base),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrRemoting() {
|
||||
c.channels = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "channels_total"),
|
||||
"Displays the total number of remoting channels registered across all application domains since application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contextBoundClassesLoaded = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_bound_classes_loaded"),
|
||||
"Displays the current number of context-bound classes that are loaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contextBoundObjects = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_bound_objects_total"),
|
||||
"Displays the total number of context-bound objects allocated.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contextProxies = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "context_proxies_total"),
|
||||
"Displays the total number of remoting proxy objects in this process since it started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.contexts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "contexts"),
|
||||
"Displays the current number of remoting contexts in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalRemoteCalls = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "remote_calls_total"),
|
||||
"Displays the total number of remote procedure calls invoked since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
|
||||
Name string
|
||||
|
||||
Channels uint32
|
||||
ContextBoundClassesLoaded uint32
|
||||
ContextBoundObjectsAllocPersec uint32
|
||||
ContextProxies uint32
|
||||
Contexts uint32
|
||||
RemoteCallsPersec uint32
|
||||
TotalRemoteCalls uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrRemoting(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRRemoting", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.channels,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Channels),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.contextBoundClassesLoaded,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.ContextBoundClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.contextBoundObjects,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ContextBoundObjectsAllocPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.contextProxies,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ContextProxies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.contexts,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Contexts),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRemoteCalls,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalRemoteCalls),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package netframework
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (c *Collector) buildClrSecurity() {
|
||||
c.numberLinkTimeChecks = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "link_time_checks_total"),
|
||||
"Displays the total number of link-time code access security checks since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.timeInRTChecks = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "rt_checks_time_percent"),
|
||||
"Displays the percentage of time spent performing runtime code access security checks in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.stackWalkDepth = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "stack_walk_depth"),
|
||||
"Displays the depth of the stack during that last runtime code access security check.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
c.totalRuntimeChecks = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "runtime_checks_total"),
|
||||
"Displays the total number of runtime code access security checks performed since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
|
||||
Name string
|
||||
|
||||
Frequency_PerfTime uint32
|
||||
NumberLinkTimeChecks uint32
|
||||
PercentTimeinRTchecks uint32
|
||||
PercentTimeSigAuthenticating uint64
|
||||
StackWalkDepth uint32
|
||||
TotalRuntimeChecks uint32
|
||||
}
|
||||
|
||||
func (c *Collector) collectClrSecurity(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_NETFramework_NETCLRSecurity", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.numberLinkTimeChecks,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberLinkTimeChecks),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timeInRTChecks,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PercentTimeinRTchecks)/float64(process.Frequency_PerfTime),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.stackWalkDepth,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.StackWalkDepth),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalRuntimeChecks,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalRuntimeChecks),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package netframework_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/netframework"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
// No context name required as Collector source is WMI
|
||||
testutils.FuncBenchmarkCollector(b, netframework.Name, netframework.NewWithFlags)
|
||||
}
|
||||
@@ -1,466 +0,0 @@
|
||||
package nps
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "nps"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// Collector is a Prometheus Collector for WMI Win32_PerfRawData_IAS_NPSAuthenticationServer and Win32_PerfRawData_IAS_NPSAccountingServer metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
accessAccepts *prometheus.Desc
|
||||
accessChallenges *prometheus.Desc
|
||||
accessRejects *prometheus.Desc
|
||||
accessRequests *prometheus.Desc
|
||||
accessBadAuthenticators *prometheus.Desc
|
||||
accessDroppedPackets *prometheus.Desc
|
||||
accessInvalidRequests *prometheus.Desc
|
||||
accessMalformedPackets *prometheus.Desc
|
||||
accessPacketsReceived *prometheus.Desc
|
||||
accessPacketsSent *prometheus.Desc
|
||||
accessServerResetTime *prometheus.Desc
|
||||
accessServerUpTime *prometheus.Desc
|
||||
accessUnknownType *prometheus.Desc
|
||||
|
||||
accountingRequests *prometheus.Desc
|
||||
accountingResponses *prometheus.Desc
|
||||
accountingBadAuthenticators *prometheus.Desc
|
||||
accountingDroppedPackets *prometheus.Desc
|
||||
accountingInvalidRequests *prometheus.Desc
|
||||
accountingMalformedPackets *prometheus.Desc
|
||||
accountingNoRecord *prometheus.Desc
|
||||
accountingPacketsReceived *prometheus.Desc
|
||||
accountingPacketsSent *prometheus.Desc
|
||||
accountingServerResetTime *prometheus.Desc
|
||||
accountingServerUpTime *prometheus.Desc
|
||||
accountingUnknownType *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
c.accessAccepts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_accepts"),
|
||||
"(AccessAccepts)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessChallenges = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_challenges"),
|
||||
"(AccessChallenges)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessRejects = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_rejects"),
|
||||
"(AccessRejects)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_requests"),
|
||||
"(AccessRequests)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessBadAuthenticators = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_bad_authenticators"),
|
||||
"(BadAuthenticators)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessDroppedPackets = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_dropped_packets"),
|
||||
"(DroppedPackets)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessInvalidRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_invalid_requests"),
|
||||
"(InvalidRequests)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessMalformedPackets = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_malformed_packets"),
|
||||
"(MalformedPackets)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessPacketsReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_packets_received"),
|
||||
"(PacketsReceived)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessPacketsSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_packets_sent"),
|
||||
"(PacketsSent)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessServerResetTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_server_reset_time"),
|
||||
"(ServerResetTime)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessServerUpTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_server_up_time"),
|
||||
"(ServerUpTime)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accessUnknownType = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_unknown_type"),
|
||||
"(UnknownType)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
c.accountingRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_requests"),
|
||||
"(AccountingRequests)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingResponses = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_responses"),
|
||||
"(AccountingResponses)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingBadAuthenticators = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_bad_authenticators"),
|
||||
"(BadAuthenticators)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingDroppedPackets = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_dropped_packets"),
|
||||
"(DroppedPackets)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingInvalidRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_invalid_requests"),
|
||||
"(InvalidRequests)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingMalformedPackets = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_malformed_packets"),
|
||||
"(MalformedPackets)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingNoRecord = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_no_record"),
|
||||
"(NoRecord)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingPacketsReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_packets_received"),
|
||||
"(PacketsReceived)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingPacketsSent = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_packets_sent"),
|
||||
"(PacketsSent)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingServerResetTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_server_reset_time"),
|
||||
"(ServerResetTime)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingServerUpTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_server_up_time"),
|
||||
"(ServerUpTime)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.accountingUnknownType = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "accounting_unknown_type"),
|
||||
"(UnknownType)",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.CollectAccept(ch); err != nil {
|
||||
logger.Error(fmt.Sprintf("failed collecting NPS accept data: %s", err))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.CollectAccounting(ch); err != nil {
|
||||
logger.Error(fmt.Sprintf("failed collecting NPS accounting data: %s", err))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_IAS_NPSAuthenticationServer docs:
|
||||
// at the moment there is no Microsoft documentation.
|
||||
type Win32_PerfRawData_IAS_NPSAuthenticationServer struct {
|
||||
Name string
|
||||
|
||||
AccessAccepts uint32
|
||||
AccessChallenges uint32
|
||||
AccessRejects uint32
|
||||
AccessRequests uint32
|
||||
AccessBadAuthenticators uint32
|
||||
AccessDroppedPackets uint32
|
||||
AccessInvalidRequests uint32
|
||||
AccessMalformedPackets uint32
|
||||
AccessPacketsReceived uint32
|
||||
AccessPacketsSent uint32
|
||||
AccessServerResetTime uint32
|
||||
AccessServerUpTime uint32
|
||||
AccessUnknownType uint32
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_IAS_NPSAccountingServer struct {
|
||||
Name string
|
||||
|
||||
AccountingRequests uint32
|
||||
AccountingResponses uint32
|
||||
AccountingBadAuthenticators uint32
|
||||
AccountingDroppedPackets uint32
|
||||
AccountingInvalidRequests uint32
|
||||
AccountingMalformedPackets uint32
|
||||
AccountingNoRecord uint32
|
||||
AccountingPacketsReceived uint32
|
||||
AccountingPacketsSent uint32
|
||||
AccountingServerResetTime uint32
|
||||
AccountingServerUpTime uint32
|
||||
AccountingUnknownType uint32
|
||||
}
|
||||
|
||||
// CollectAccept sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) CollectAccept(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_IAS_NPSAuthenticationServer
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_IAS_NPSAuthenticationServer", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessAccepts,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessAccepts),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessChallenges,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessChallenges),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessRejects,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessRejects),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessRequests,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessRequests),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessBadAuthenticators,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessBadAuthenticators),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessDroppedPackets,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessDroppedPackets),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessInvalidRequests,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessInvalidRequests),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessMalformedPackets,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessMalformedPackets),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessPacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessPacketsReceived),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessPacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessPacketsSent),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessServerResetTime,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessServerResetTime),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessServerUpTime,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessServerUpTime),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accessUnknownType,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccessUnknownType),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) CollectAccounting(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_PerfRawData_IAS_NPSAccountingServer
|
||||
if err := c.wmiClient.Query("SELECT * FROM Win32_PerfRawData_IAS_NPSAccountingServer", &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingRequests,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingRequests),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingResponses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingResponses),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingBadAuthenticators,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingBadAuthenticators),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingDroppedPackets,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingDroppedPackets),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingInvalidRequests,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingInvalidRequests),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingMalformedPackets,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingMalformedPackets),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingNoRecord,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingNoRecord),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingPacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingPacketsReceived),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingPacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingPacketsSent),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingServerResetTime,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingServerResetTime),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingServerUpTime,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingServerUpTime),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.accountingUnknownType,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AccountingUnknownType),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package nps_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/nps"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, nps.Name, nps.NewWithFlags)
|
||||
}
|
||||
@@ -1,502 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package os
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/headers/kernel32"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/headers/netapi32"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/headers/psapi"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/headers/sysinfoapi"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
const Name = "os"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// A Collector is a Prometheus Collector for WMI metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
hostname *prometheus.Desc
|
||||
osInformation *prometheus.Desc
|
||||
pagingFreeBytes *prometheus.Desc
|
||||
pagingLimitBytes *prometheus.Desc
|
||||
|
||||
// users
|
||||
// Deprecated: Use windows_system_processes instead.
|
||||
processes *prometheus.Desc
|
||||
// users
|
||||
// Deprecated: Use windows_system_process_limit instead.
|
||||
processesLimit *prometheus.Desc
|
||||
|
||||
// users
|
||||
// Deprecated: Use count(windows_logon_logon_type) instead.
|
||||
users *prometheus.Desc
|
||||
|
||||
// physicalMemoryFreeBytes
|
||||
// Deprecated: Use windows_memory_physical_free_bytes instead.
|
||||
physicalMemoryFreeBytes *prometheus.Desc
|
||||
|
||||
// processMemoryLimitBytes
|
||||
// Deprecated: Use windows_memory_process_memory_limit_bytes instead.
|
||||
processMemoryLimitBytes *prometheus.Desc
|
||||
|
||||
// time
|
||||
// Deprecated: Use windows_time_current_timestamp_seconds instead.
|
||||
time *prometheus.Desc
|
||||
// timezone
|
||||
// Deprecated: Use windows_time_timezone instead.
|
||||
timezone *prometheus.Desc
|
||||
// virtualMemoryBytes
|
||||
// Deprecated: Use windows_memory_commit_limit instead.
|
||||
virtualMemoryBytes *prometheus.Desc
|
||||
// virtualMemoryFreeBytes
|
||||
// Deprecated: Use windows_memory_commit_limit instead.
|
||||
virtualMemoryFreeBytes *prometheus.Desc
|
||||
// visibleMemoryBytes
|
||||
// Deprecated: Use windows_memory_physical_total_bytes instead.
|
||||
visibleMemoryBytes *prometheus.Desc
|
||||
}
|
||||
|
||||
type pagingFileCounter struct {
|
||||
Name string
|
||||
Usage float64 `perflib:"% Usage"`
|
||||
UsagePeak float64 `perflib:"% Usage Peak"`
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"Paging File"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
|
||||
logger.Warn("The os collect holds a number of deprecated metrics and will be removed mid 2025. " +
|
||||
"See https://github.com/prometheus-community/windows_exporter/pull/1596 for more information.")
|
||||
|
||||
workstationInfo, err := netapi32.GetWorkstationInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get workstation info: %w", err)
|
||||
}
|
||||
|
||||
productName, buildNumber, revision, err := c.getWindowsVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get Windows version: %w", err)
|
||||
}
|
||||
|
||||
c.osInformation = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
`Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \"10\"; a build number greater than 22000 represents Windows 11.`,
|
||||
nil,
|
||||
prometheus.Labels{
|
||||
"product": productName,
|
||||
"version": fmt.Sprintf("%d.%d.%s", workstationInfo.VersionMajor, workstationInfo.VersionMinor, buildNumber),
|
||||
"major_version": strconv.FormatUint(uint64(workstationInfo.VersionMajor), 10),
|
||||
"minor_version": strconv.FormatUint(uint64(workstationInfo.VersionMinor), 10),
|
||||
"build_number": buildNumber,
|
||||
"revision": revision,
|
||||
},
|
||||
)
|
||||
|
||||
c.hostname = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "hostname"),
|
||||
"Labelled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain",
|
||||
[]string{
|
||||
"hostname",
|
||||
"domain",
|
||||
"fqdn",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
c.pagingLimitBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "paging_limit_bytes"),
|
||||
"OperatingSystem.SizeStoredInPagingFiles",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.pagingFreeBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "paging_free_bytes"),
|
||||
"OperatingSystem.FreeSpaceInPagingFiles",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.physicalMemoryFreeBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "physical_memory_free_bytes"),
|
||||
"Deprecated: Use `windows_memory_physical_free_bytes` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.time = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "time"),
|
||||
"Deprecated: Use windows_time_current_timestamp_seconds instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.timezone = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "timezone"),
|
||||
"Deprecated: Use windows_time_timezone instead.",
|
||||
[]string{"timezone"},
|
||||
nil,
|
||||
)
|
||||
c.processes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processes"),
|
||||
"Deprecated: Use `windows_system_processes` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.processesLimit = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "processes_limit"),
|
||||
"Deprecated: Use `windows_system_process_limit` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.processMemoryLimitBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "process_memory_limit_bytes"),
|
||||
"Deprecated: Use `windows_memory_process_memory_limit_bytes` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.users = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "users"),
|
||||
"Deprecated: Use `count(windows_logon_logon_type)` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.virtualMemoryBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "virtual_memory_bytes"),
|
||||
"Deprecated: Use `windows_memory_commit_limit` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.visibleMemoryBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "visible_memory_bytes"),
|
||||
"Deprecated: Use `windows_memory_physical_total_bytes` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.virtualMemoryFreeBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "virtual_memory_free_bytes"),
|
||||
"Deprecated: Use `windows_memory_commit_limit - windows_memory_committed_bytes` instead.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
errs := make([]error, 0, 5)
|
||||
|
||||
c.collect(ch)
|
||||
|
||||
if err := c.collectHostname(ch); err != nil {
|
||||
logger.Error("failed collecting os metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := c.collectLoggedInUserCount(ch); err != nil {
|
||||
logger.Error("failed collecting os user count metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := c.collectMemory(ch); err != nil {
|
||||
logger.Error("failed collecting os memory metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := c.collectTime(ch); err != nil {
|
||||
logger.Error("failed collecting os time metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := c.collectPaging(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting os paging metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (c *Collector) collectLoggedInUserCount(ch chan<- prometheus.Metric) error {
|
||||
workstationInfo, err := netapi32.GetWorkstationInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.users,
|
||||
prometheus.GaugeValue,
|
||||
float64(workstationInfo.LoggedOnUsers),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectHostname(ch chan<- prometheus.Metric) error {
|
||||
hostname, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSHostname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
domain, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSDomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fqdn, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSFullyQualified)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.hostname,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
hostname,
|
||||
domain,
|
||||
fqdn,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectTime(ch chan<- prometheus.Metric) error {
|
||||
timeZoneInfo, err := kernel32.GetDynamicTimeZoneInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// timeZoneKeyName contains the english name of the timezone.
|
||||
timezoneName := windows.UTF16ToString(timeZoneInfo.TimeZoneKeyName[:])
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.time,
|
||||
prometheus.GaugeValue,
|
||||
float64(time.Now().Unix()),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.timezone,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
timezoneName,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectMemory(ch chan<- prometheus.Metric) error {
|
||||
memoryStatusEx, err := sysinfoapi.GlobalMemoryStatusEx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.physicalMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.AvailPhys),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.virtualMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.AvailPageFile),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.virtualMemoryBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.TotalPageFile),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.visibleMemoryBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.TotalPhys),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processMemoryLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(memoryStatusEx.TotalVirtual),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPaging(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
// Get total allocation of paging files across all disks.
|
||||
memManKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer memManKey.Close()
|
||||
|
||||
pagingFiles, _, pagingErr := memManKey.GetStringsValue("ExistingPageFiles")
|
||||
|
||||
var fsipf float64
|
||||
|
||||
for _, pagingFile := range pagingFiles {
|
||||
fileString := strings.ReplaceAll(pagingFile, `\??\`, "")
|
||||
file, err := os.Stat(fileString)
|
||||
// For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting.
|
||||
if err != nil {
|
||||
logger.Debug(fmt.Sprintf("Failed to read page file (reason: %s): %s\n", err, fileString))
|
||||
} else {
|
||||
fsipf += float64(file.Size())
|
||||
}
|
||||
}
|
||||
|
||||
gpi, err := psapi.GetPerformanceInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pfc := make([]pagingFileCounter, 0)
|
||||
if err = perflib.UnmarshalObject(ctx.PerfObjects["Paging File"], &pfc, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get current page file usage.
|
||||
var pfbRaw float64
|
||||
|
||||
for _, pageFile := range pfc {
|
||||
if strings.Contains(strings.ToLower(pageFile.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
pfbRaw += pageFile.Usage
|
||||
}
|
||||
|
||||
if pagingErr == nil {
|
||||
// Subtract from total page file allocation on disk.
|
||||
pfb := fsipf - (pfbRaw * float64(gpi.PageSize))
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pagingFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
pfb,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pagingLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
fsipf,
|
||||
)
|
||||
} else {
|
||||
logger.Debug("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processes,
|
||||
prometheus.GaugeValue,
|
||||
float64(gpi.ProcessCount),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.osInformation,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
)
|
||||
|
||||
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
|
||||
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824
|
||||
// https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processesLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(4294967295),
|
||||
)
|
||||
}
|
||||
|
||||
func (c *Collector) getWindowsVersion() (string, string, string, error) {
|
||||
// Get build number and product name from registry
|
||||
ntKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("failed to open registry key: %w", err)
|
||||
}
|
||||
|
||||
defer ntKey.Close()
|
||||
|
||||
productName, _, err := ntKey.GetStringValue("ProductName")
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
buildNumber, _, err := ntKey.GetStringValue("CurrentBuildNumber")
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
revision, _, err := ntKey.GetIntegerValue("UBR")
|
||||
if errors.Is(err, registry.ErrNotExist) {
|
||||
revision = 0
|
||||
} else if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
return productName, buildNumber, strconv.FormatUint(revision, 10), nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package os_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/os"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, os.Name, os.NewWithFlags)
|
||||
}
|
||||
@@ -1,181 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package perfdata
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const (
|
||||
Name = "perfdata"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Objects []Object `yaml:"objects"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
Objects: make([]Object, 0),
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus collector for perfdata metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.Objects == nil {
|
||||
config.Objects = ConfigDefaults.Objects
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
var objects string
|
||||
|
||||
app.Flag(
|
||||
"collector.perfdata.objects",
|
||||
"Objects of performance data to observe. See docs for more information on how to use this flag. By default, no objects are observed.",
|
||||
).Default("").StringVar(&objects)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
if objects == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(objects), &c.config.Objects); err != nil {
|
||||
return fmt.Errorf("failed to parse objects: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
for _, object := range c.config.Objects {
|
||||
object.collector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
|
||||
logger.Warn("The perfdata collector is in an experimental state! The configuration may change in future. Please report any issues.")
|
||||
|
||||
for i, object := range c.config.Objects {
|
||||
collector, err := perfdata.NewCollector(object.Object, object.Instances, slices.Sorted(maps.Keys(object.Counters)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pdh collector: %w", err)
|
||||
}
|
||||
|
||||
if object.InstanceLabel == "" {
|
||||
c.config.Objects[i].InstanceLabel = "instance"
|
||||
}
|
||||
|
||||
c.config.Objects[i].collector = collector
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting performance data metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
for _, object := range c.config.Objects {
|
||||
data, err := object.collector.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect data: %w", err)
|
||||
}
|
||||
|
||||
for instance, counters := range data {
|
||||
for counter, value := range counters {
|
||||
var labels prometheus.Labels
|
||||
if instance != perfdata.EmptyInstance {
|
||||
labels = prometheus.Labels{object.InstanceLabel: instance}
|
||||
}
|
||||
|
||||
metricType := value.Type
|
||||
|
||||
if val, ok := object.Counters[counter]; ok {
|
||||
switch val.Type {
|
||||
case "counter":
|
||||
metricType = prometheus.CounterValue
|
||||
case "gauge":
|
||||
metricType = prometheus.GaugeValue
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
sanitizeMetricName(fmt.Sprintf("%s_perfdata_%s_%s", types.Namespace, object.Object, counter)),
|
||||
fmt.Sprintf("Performance data for \\%s\\%s", object.Object, counter),
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
metricType,
|
||||
value.FirstValue,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sanitizeMetricName(name string) string {
|
||||
replacer := strings.NewReplacer(
|
||||
".", "",
|
||||
"%", "",
|
||||
"/", "_",
|
||||
" ", "_",
|
||||
"-", "_",
|
||||
)
|
||||
|
||||
return strings.Trim(replacer.Replace(strings.ToLower(name)), "_")
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package perfdata_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/perfdata"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type collectorAdapter struct {
|
||||
perfdata.Collector
|
||||
}
|
||||
|
||||
// Describe implements the prometheus.Collector interface.
|
||||
func (a collectorAdapter) Describe(_ chan<- *prometheus.Desc) {}
|
||||
|
||||
// Collect implements the prometheus.Collector interface.
|
||||
func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) {
|
||||
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||
|
||||
if err := a.Collector.Collect(nil, logger, ch); err != nil {
|
||||
panic(fmt.Sprintf("failed to update collector: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollector(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
object string
|
||||
instances []string
|
||||
counters map[string]perfdata.Counter
|
||||
expectedMetrics *regexp.Regexp
|
||||
}{
|
||||
{
|
||||
object: "Memory",
|
||||
instances: nil,
|
||||
counters: map[string]perfdata.Counter{"Available Bytes": {Type: "gauge"}},
|
||||
expectedMetrics: regexp.MustCompile(`^# HELP windows_perfdata_memory_available_bytes Performance data for \\\\Memory\\\\Available Bytes\s*# TYPE windows_perfdata_memory_available_bytes gauge\s*windows_perfdata_memory_available_bytes \d`),
|
||||
},
|
||||
{
|
||||
object: "Process",
|
||||
instances: []string{"*"},
|
||||
counters: map[string]perfdata.Counter{"Thread Count": {Type: "counter"}},
|
||||
expectedMetrics: regexp.MustCompile(`^# HELP windows_perfdata_process_thread_count Performance data for \\\\Process\\\\Thread Count\s*# TYPE windows_perfdata_process_thread_count counter\s*windows_perfdata_process_thread_count\{instance=".+"} \d`),
|
||||
},
|
||||
} {
|
||||
t.Run(tc.object, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
perfDataCollector := perfdata.New(&perfdata.Config{
|
||||
Objects: []perfdata.Object{
|
||||
{
|
||||
Object: tc.object,
|
||||
Instances: tc.instances,
|
||||
Counters: tc.counters,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||
err := perfDataCollector.Build(logger, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
registry.MustRegister(collectorAdapter{*perfDataCollector})
|
||||
|
||||
rw := httptest.NewRecorder()
|
||||
promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}).ServeHTTP(rw, &http.Request{})
|
||||
got := rw.Body.String()
|
||||
|
||||
assert.NotEmpty(t, got)
|
||||
assert.Regexp(t, tc.expectedMetrics, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package perfdata_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
perfDataObjects := `[{"object":"Processor Information","instances":["*"],"counters":{"*": {}}}]`
|
||||
kingpin.CommandLine.GetArg("collector.perfdata.objects").StringVar(&perfDataObjects)
|
||||
|
||||
testutils.FuncBenchmarkCollector(b, perfdata.Name, perfdata.NewWithFlags)
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package perfdata
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perfdata"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
Object string `json:"object" yaml:"object"`
|
||||
Instances []string `json:"instances" yaml:"instances"`
|
||||
Counters map[string]Counter `json:"counters" yaml:"counters"`
|
||||
InstanceLabel string `json:"instance_label" yaml:"instance_label"` //nolint:tagliatelle
|
||||
|
||||
collector *perfdata.Collector
|
||||
}
|
||||
|
||||
type Counter struct {
|
||||
Type string `json:"type" yaml:"type"`
|
||||
}
|
||||
@@ -1,344 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package physical_disk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "physical_disk"
|
||||
|
||||
type Config struct {
|
||||
DiskInclude *regexp.Regexp `yaml:"disk_include"`
|
||||
DiskExclude *regexp.Regexp `yaml:"disk_exclude"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
DiskInclude: types.RegExpAny,
|
||||
DiskExclude: types.RegExpEmpty,
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for perflib PhysicalDisk metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
idleTime *prometheus.Desc
|
||||
readBytesTotal *prometheus.Desc
|
||||
readLatency *prometheus.Desc
|
||||
readTime *prometheus.Desc
|
||||
readWriteLatency *prometheus.Desc
|
||||
readsTotal *prometheus.Desc
|
||||
requestsQueued *prometheus.Desc
|
||||
splitIOs *prometheus.Desc
|
||||
writeBytesTotal *prometheus.Desc
|
||||
writeLatency *prometheus.Desc
|
||||
writeTime *prometheus.Desc
|
||||
writesTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.DiskExclude == nil {
|
||||
config.DiskExclude = ConfigDefaults.DiskExclude
|
||||
}
|
||||
|
||||
if config.DiskInclude == nil {
|
||||
config.DiskInclude = ConfigDefaults.DiskInclude
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
var diskExclude, diskInclude string
|
||||
|
||||
app.Flag(
|
||||
"collector.physical_disk.disk-exclude",
|
||||
"Regexp of disks to exclude. Disk number must both match include and not match exclude to be included.",
|
||||
).Default(c.config.DiskExclude.String()).StringVar(&diskExclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.physical_disk.disk-include",
|
||||
"Regexp of disks to include. Disk number must both match include and not match exclude to be included.",
|
||||
).Default(c.config.DiskInclude.String()).StringVar(&diskInclude)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
var err error
|
||||
|
||||
c.config.DiskExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", diskExclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.physical_disk.disk-exclude: %w", err)
|
||||
}
|
||||
|
||||
c.config.DiskInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", diskInclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.physical_disk.disk-include: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"PhysicalDisk"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
c.requestsQueued = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
|
||||
"The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readBytesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_bytes_total"),
|
||||
"The number of bytes transferred from the disk during read operations (PhysicalDisk.DiskReadBytesPerSec)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "reads_total"),
|
||||
"The number of read operations on the disk (PhysicalDisk.DiskReadsPerSec)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writeBytesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "write_bytes_total"),
|
||||
"The number of bytes transferred to the disk during write operations (PhysicalDisk.DiskWriteBytesPerSec)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "writes_total"),
|
||||
"The number of write operations on the disk (PhysicalDisk.DiskWritesPerSec)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_seconds_total"),
|
||||
"Seconds that the disk was busy servicing read requests (PhysicalDisk.PercentDiskReadTime)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writeTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "write_seconds_total"),
|
||||
"Seconds that the disk was busy servicing write requests (PhysicalDisk.PercentDiskWriteTime)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.idleTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "idle_seconds_total"),
|
||||
"Seconds that the disk was idle (PhysicalDisk.PercentIdleTime)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.splitIOs = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "split_ios_total"),
|
||||
"The number of I/Os to the disk were split into multiple I/Os (PhysicalDisk.SplitIOPerSec)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readLatency = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a read operation from the disk (PhysicalDisk.AvgDiskSecPerRead)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.writeLatency = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "write_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a write operation to the disk (PhysicalDisk.AvgDiskSecPerWrite)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.readWriteLatency = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "read_write_latency_seconds_total"),
|
||||
"Shows the time, in seconds, of the average disk transfer (PhysicalDisk.AvgDiskSecPerTransfer)",
|
||||
[]string{"disk"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting physical_disk metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PhysicalDisk
|
||||
// Win32_PerfRawData_PerfDisk_PhysicalDisk docs:
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/aa394308(v=vs.85) - Win32_PerfRawData_PerfDisk_PhysicalDisk class.
|
||||
type PhysicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
|
||||
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
|
||||
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
|
||||
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
|
||||
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
|
||||
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
|
||||
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
|
||||
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
|
||||
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
|
||||
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var dst []PhysicalDisk
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["PhysicalDisk"], &dst, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, disk := range dst {
|
||||
if disk.Name == "_Total" ||
|
||||
c.config.DiskExclude.MatchString(disk.Name) ||
|
||||
!c.config.DiskInclude.MatchString(disk.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse physical disk number from disk.Name. Mountpoint information is
|
||||
// sometimes included, e.g. "1 C:".
|
||||
disk_number, _, _ := strings.Cut(disk.Name, " ")
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestsQueued,
|
||||
prometheus.GaugeValue,
|
||||
disk.CurrentDiskQueueLength,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
disk.DiskReadBytesPerSec,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readsTotal,
|
||||
prometheus.CounterValue,
|
||||
disk.DiskReadsPerSec,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
disk.DiskWriteBytesPerSec,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writesTotal,
|
||||
prometheus.CounterValue,
|
||||
disk.DiskWritesPerSec,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readTime,
|
||||
prometheus.CounterValue,
|
||||
disk.PercentDiskReadTime,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeTime,
|
||||
prometheus.CounterValue,
|
||||
disk.PercentDiskWriteTime,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.idleTime,
|
||||
prometheus.CounterValue,
|
||||
disk.PercentIdleTime,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.splitIOs,
|
||||
prometheus.CounterValue,
|
||||
disk.SplitIOPerSec,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readLatency,
|
||||
prometheus.CounterValue,
|
||||
disk.AvgDiskSecPerRead*perflib.TicksToSecondScaleFactor,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeLatency,
|
||||
prometheus.CounterValue,
|
||||
disk.AvgDiskSecPerWrite*perflib.TicksToSecondScaleFactor,
|
||||
disk_number,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readWriteLatency,
|
||||
prometheus.CounterValue,
|
||||
disk.AvgDiskSecPerTransfer*perflib.TicksToSecondScaleFactor,
|
||||
disk_number,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package physical_disk_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/physical_disk"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, physical_disk.Name, physical_disk.NewWithFlags)
|
||||
}
|
||||
@@ -1,259 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package printer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "printer"
|
||||
|
||||
// printerStatusMap source: https://learn.microsoft.com/en-us/windows/win32/cimwin32prov/win32-printer#:~:text=Power%20Save-,PrinterStatus,Offline%20(7),-PrintJobDataType
|
||||
var printerStatusMap = map[uint16]string{
|
||||
1: "Other",
|
||||
2: "Unknown",
|
||||
3: "Idle",
|
||||
4: "Printing",
|
||||
5: "Warmup",
|
||||
6: "Stopped Printing",
|
||||
7: "Offline",
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
PrinterInclude *regexp.Regexp `yaml:"printer_include"`
|
||||
PrinterExclude *regexp.Regexp `yaml:"printer_exclude"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
PrinterInclude: types.RegExpAny,
|
||||
PrinterExclude: types.RegExpEmpty,
|
||||
}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
printerStatus *prometheus.Desc
|
||||
printerJobStatus *prometheus.Desc
|
||||
printerJobCount *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.PrinterExclude == nil {
|
||||
config.PrinterExclude = ConfigDefaults.PrinterExclude
|
||||
}
|
||||
|
||||
if config.PrinterInclude == nil {
|
||||
config.PrinterInclude = ConfigDefaults.PrinterInclude
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
var printerInclude, printerExclude string
|
||||
|
||||
app.Flag(
|
||||
"collector.printer.include",
|
||||
"Regular expression to match printers to collect metrics for",
|
||||
).Default(c.config.PrinterInclude.String()).StringVar(&printerInclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.printer.exclude",
|
||||
"Regular expression to match printers to exclude",
|
||||
).Default(c.config.PrinterExclude.String()).StringVar(&printerExclude)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
var err error
|
||||
|
||||
c.config.PrinterInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", printerInclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.printer.include: %w", err)
|
||||
}
|
||||
|
||||
c.config.PrinterExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", printerExclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.printer.exclude: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, wmiClient *wmi.Client) error {
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
|
||||
c.printerJobStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "job_status"),
|
||||
"A counter of printer jobs by status",
|
||||
[]string{"printer", "status"},
|
||||
nil,
|
||||
)
|
||||
c.printerStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "status"),
|
||||
"Printer status",
|
||||
[]string{"printer", "status"},
|
||||
nil,
|
||||
)
|
||||
c.printerJobCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "job_count"),
|
||||
"Number of jobs processed by the printer since the last reset",
|
||||
[]string{"printer"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string { return Name }
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"Printer"}, nil
|
||||
}
|
||||
|
||||
type wmiPrinter struct {
|
||||
Name string
|
||||
Default bool
|
||||
PrinterStatus uint16
|
||||
JobCountSinceLastReset uint32
|
||||
}
|
||||
|
||||
type wmiPrintJob struct {
|
||||
Name string
|
||||
Status string
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collectPrinterStatus(ch); err != nil {
|
||||
logger.Error("failed to collect printer status metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.collectPrinterJobStatus(ch); err != nil {
|
||||
logger.Error("failed to collect printer job status metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPrinterStatus(ch chan<- prometheus.Metric) error {
|
||||
var printers []wmiPrinter
|
||||
if err := c.wmiClient.Query("SELECT * FROM win32_Printer", &printers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, printer := range printers {
|
||||
if c.config.PrinterExclude.MatchString(printer.Name) ||
|
||||
!c.config.PrinterInclude.MatchString(printer.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
for printerStatus, printerStatusName := range printerStatusMap {
|
||||
isCurrentStatus := 0.0
|
||||
if printerStatus == printer.PrinterStatus {
|
||||
isCurrentStatus = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.printerStatus,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentStatus,
|
||||
printer.Name,
|
||||
printerStatusName,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.printerJobCount,
|
||||
prometheus.CounterValue,
|
||||
float64(printer.JobCountSinceLastReset),
|
||||
printer.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collectPrinterJobStatus(ch chan<- prometheus.Metric) error {
|
||||
var printJobs []wmiPrintJob
|
||||
if err := c.wmiClient.Query("SELECT * FROM win32_PrintJob", &printJobs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groupedPrintJobs := c.groupPrintJobs(printJobs)
|
||||
for group, count := range groupedPrintJobs {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.printerJobStatus,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
group.printerName,
|
||||
group.status,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type PrintJobStatusGroup struct {
|
||||
printerName string
|
||||
status string
|
||||
}
|
||||
|
||||
func (c *Collector) groupPrintJobs(printJobs []wmiPrintJob) map[PrintJobStatusGroup]int {
|
||||
groupedPrintJobs := make(map[PrintJobStatusGroup]int)
|
||||
|
||||
for _, printJob := range printJobs {
|
||||
printerName := strings.Split(printJob.Name, ",")[0]
|
||||
|
||||
if c.config.PrinterExclude.MatchString(printerName) ||
|
||||
!c.config.PrinterInclude.MatchString(printerName) {
|
||||
continue
|
||||
}
|
||||
|
||||
groupedPrintJobs[PrintJobStatusGroup{
|
||||
printerName: printerName,
|
||||
status: printJob.Status,
|
||||
}]++
|
||||
}
|
||||
|
||||
return groupedPrintJobs
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package printer_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/printer"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all printers.
|
||||
printersInclude := ".+"
|
||||
kingpin.CommandLine.GetArg("collector.printer.include").StringVar(&printersInclude)
|
||||
testutils.FuncBenchmarkCollector(b, "printer", printer.NewWithFlags)
|
||||
}
|
||||
@@ -1,645 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package process
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const Name = "process"
|
||||
|
||||
type Config struct {
|
||||
ProcessInclude *regexp.Regexp `yaml:"process_include"`
|
||||
ProcessExclude *regexp.Regexp `yaml:"process_exclude"`
|
||||
EnableWorkerProcess bool `yaml:"enable_iis_worker_process"` //nolint:tagliatelle
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
ProcessInclude: types.RegExpAny,
|
||||
ProcessExclude: types.RegExpEmpty,
|
||||
EnableWorkerProcess: false,
|
||||
}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
wmiClient *wmi.Client
|
||||
|
||||
lookupCache map[string]string
|
||||
|
||||
info *prometheus.Desc
|
||||
cpuTimeTotal *prometheus.Desc
|
||||
handleCount *prometheus.Desc
|
||||
ioBytesTotal *prometheus.Desc
|
||||
ioOperationsTotal *prometheus.Desc
|
||||
pageFaultsTotal *prometheus.Desc
|
||||
pageFileBytes *prometheus.Desc
|
||||
poolBytes *prometheus.Desc
|
||||
priorityBase *prometheus.Desc
|
||||
privateBytes *prometheus.Desc
|
||||
startTime *prometheus.Desc
|
||||
threadCount *prometheus.Desc
|
||||
virtualBytes *prometheus.Desc
|
||||
workingSet *prometheus.Desc
|
||||
workingSetPeak *prometheus.Desc
|
||||
workingSetPrivate *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.ProcessExclude == nil {
|
||||
config.ProcessExclude = ConfigDefaults.ProcessExclude
|
||||
}
|
||||
|
||||
if config.ProcessInclude == nil {
|
||||
config.ProcessInclude = ConfigDefaults.ProcessInclude
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
var processExclude, processInclude string
|
||||
|
||||
app.Flag(
|
||||
"collector.process.exclude",
|
||||
"Regexp of processes to exclude. Process name must both match include and not match exclude to be included.",
|
||||
).Default(c.config.ProcessExclude.String()).StringVar(&processExclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.process.include",
|
||||
"Regexp of processes to include. Process name must both match include and not match exclude to be included.",
|
||||
).Default(c.config.ProcessInclude.String()).StringVar(&processInclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.process.iis",
|
||||
"Enable IIS worker process name queries. May cause the collector to leak memory.",
|
||||
).Default(strconv.FormatBool(c.config.EnableWorkerProcess)).BoolVar(&c.config.EnableWorkerProcess)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
var err error
|
||||
|
||||
c.config.ProcessExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", processExclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.process.exclude: %w", err)
|
||||
}
|
||||
|
||||
c.config.ProcessInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", processInclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.process.include: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"Process"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, wmiClient *wmi.Client) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
if wmiClient == nil || wmiClient.SWbemServicesClient == nil {
|
||||
return errors.New("wmiClient or SWbemServicesClient is nil")
|
||||
}
|
||||
|
||||
c.wmiClient = wmiClient
|
||||
|
||||
if c.config.ProcessInclude.String() == "^(?:.*)$" && c.config.ProcessExclude.String() == "^(?:)$" {
|
||||
logger.Warn("No filters specified for process collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
c.info = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"Process information.",
|
||||
[]string{"process", "process_id", "creating_process_id", "process_group_id", "owner", "cmdline"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.startTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "start_time"),
|
||||
"Time of process start.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.cpuTimeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "cpu_time_total"),
|
||||
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).",
|
||||
[]string{"process", "process_id", "mode"},
|
||||
nil,
|
||||
)
|
||||
c.handleCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "handles"),
|
||||
"Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.ioBytesTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "io_bytes_total"),
|
||||
"Bytes issued to I/O operations in different modes (read, write, other).",
|
||||
[]string{"process", "process_id", "mode"},
|
||||
nil,
|
||||
)
|
||||
c.ioOperationsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "io_operations_total"),
|
||||
"I/O operations issued in different modes (read, write, other).",
|
||||
[]string{"process", "process_id", "mode"},
|
||||
nil,
|
||||
)
|
||||
c.pageFaultsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "page_faults_total"),
|
||||
"Page faults by the threads executing in this process.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.pageFileBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "page_file_bytes"),
|
||||
"Current number of bytes this process has used in the paging file(s).",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.poolBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "pool_bytes"),
|
||||
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.",
|
||||
[]string{"process", "process_id", "pool"},
|
||||
nil,
|
||||
)
|
||||
c.priorityBase = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "priority_base"),
|
||||
"Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.privateBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "private_bytes"),
|
||||
"Current number of bytes this process has allocated that cannot be shared with other processes.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.threadCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "threads"),
|
||||
"Number of threads currently active in this process.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.virtualBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "virtual_bytes"),
|
||||
"Current size, in bytes, of the virtual address space that the process is using.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.workingSetPrivate = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "working_set_private_bytes"),
|
||||
"Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.workingSetPeak = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "working_set_peak_bytes"),
|
||||
"Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
c.workingSet = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "working_set_bytes"),
|
||||
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.",
|
||||
[]string{"process", "process_id"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.lookupCache = make(map[string]string)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcess struct {
|
||||
Name string
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
CreatingProcessID float64 `perflib:"Creating Process ID"`
|
||||
ElapsedTime float64 `perflib:"Elapsed Time"`
|
||||
HandleCount float64 `perflib:"Handle Count"`
|
||||
IDProcess float64 `perflib:"ID Process"`
|
||||
IODataBytesPerSec float64 `perflib:"IO Data Bytes/sec"`
|
||||
IODataOperationsPerSec float64 `perflib:"IO Data Operations/sec"`
|
||||
IOOtherBytesPerSec float64 `perflib:"IO Other Bytes/sec"`
|
||||
IOOtherOperationsPerSec float64 `perflib:"IO Other Operations/sec"`
|
||||
IOReadBytesPerSec float64 `perflib:"IO Read Bytes/sec"`
|
||||
IOReadOperationsPerSec float64 `perflib:"IO Read Operations/sec"`
|
||||
IOWriteBytesPerSec float64 `perflib:"IO Write Bytes/sec"`
|
||||
IOWriteOperationsPerSec float64 `perflib:"IO Write Operations/sec"`
|
||||
PageFaultsPerSec float64 `perflib:"Page Faults/sec"`
|
||||
PageFileBytesPeak float64 `perflib:"Page File Bytes Peak"`
|
||||
PageFileBytes float64 `perflib:"Page File Bytes"`
|
||||
PoolNonPagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
|
||||
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
|
||||
PriorityBase float64 `perflib:"Priority Base"`
|
||||
PrivateBytes float64 `perflib:"Private Bytes"`
|
||||
ThreadCount float64 `perflib:"Thread Count"`
|
||||
VirtualBytesPeak float64 `perflib:"Virtual Bytes Peak"`
|
||||
VirtualBytes float64 `perflib:"Virtual Bytes"`
|
||||
WorkingSetPrivate float64 `perflib:"Working Set - Private"`
|
||||
WorkingSetPeak float64 `perflib:"Working Set Peak"`
|
||||
WorkingSet float64 `perflib:"Working Set"`
|
||||
}
|
||||
|
||||
type WorkerProcess struct {
|
||||
AppPoolName string
|
||||
ProcessId uint64
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
data := make([]perflibProcess, 0)
|
||||
|
||||
err := perflib.UnmarshalObject(ctx.PerfObjects["Process"], &data, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var workerProcesses []WorkerProcess
|
||||
if c.config.EnableWorkerProcess {
|
||||
if err := c.wmiClient.Query("SELECT * FROM WorkerProcess", &workerProcesses, nil, "root\\WebAdministration"); err != nil {
|
||||
logger.Debug("Could not query WebAdministration namespace for IIS worker processes",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
for _, process := range data {
|
||||
if process.Name == "_Total" ||
|
||||
c.config.ProcessExclude.MatchString(process.Name) ||
|
||||
!c.config.ProcessInclude.MatchString(process.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Duplicate processes are suffixed #, and an index number. Remove those.
|
||||
processName, _, _ := strings.Cut(process.Name, "#")
|
||||
pid := strconv.FormatUint(uint64(process.IDProcess), 10)
|
||||
parentPID := strconv.FormatUint(uint64(process.CreatingProcessID), 10)
|
||||
|
||||
if c.config.EnableWorkerProcess {
|
||||
for _, wp := range workerProcesses {
|
||||
if wp.ProcessId == uint64(process.IDProcess) {
|
||||
processName = strings.Join([]string{processName, wp.AppPoolName}, "_")
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cmdLine, processOwner, processGroupID, err := c.getProcessInformation(logger, uint32(process.IDProcess))
|
||||
if err != nil {
|
||||
logger.Debug("Failed to get process information",
|
||||
slog.String("pid", pid),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.info,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
processName, pid, parentPID, strconv.Itoa(int(processGroupID)), processOwner, cmdLine,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.startTime,
|
||||
prometheus.GaugeValue,
|
||||
process.ElapsedTime,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.handleCount,
|
||||
prometheus.GaugeValue,
|
||||
process.HandleCount,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process.PercentPrivilegedTime,
|
||||
processName, pid, "privileged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.cpuTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process.PercentUserTime,
|
||||
processName, pid, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOOtherBytesPerSec,
|
||||
processName, pid, "other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOOtherOperationsPerSec,
|
||||
processName, pid, "other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOReadBytesPerSec,
|
||||
processName, pid, "read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOReadOperationsPerSec,
|
||||
processName, pid, "read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOWriteBytesPerSec,
|
||||
processName, pid, "write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ioOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOWriteOperationsPerSec,
|
||||
processName, pid, "write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pageFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.PageFaultsPerSec,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.pageFileBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PageFileBytes,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PoolNonPagedBytes,
|
||||
processName, pid, "nonpaged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.poolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PoolPagedBytes,
|
||||
processName, pid, "paged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.priorityBase,
|
||||
prometheus.GaugeValue,
|
||||
process.PriorityBase,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.privateBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PrivateBytes,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.threadCount,
|
||||
prometheus.GaugeValue,
|
||||
process.ThreadCount,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.virtualBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.VirtualBytes,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSetPrivate,
|
||||
prometheus.GaugeValue,
|
||||
process.WorkingSetPrivate,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSetPeak,
|
||||
prometheus.GaugeValue,
|
||||
process.WorkingSetPeak,
|
||||
processName, pid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.workingSet,
|
||||
prometheus.GaugeValue,
|
||||
process.WorkingSet,
|
||||
processName, pid,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ref: https://github.com/microsoft/hcsshim/blob/8beabacfc2d21767a07c20f8dd5f9f3932dbf305/internal/uvm/stats.go#L25
|
||||
func (c *Collector) getProcessInformation(logger *slog.Logger, pid uint32) (string, string, uint32, error) {
|
||||
if pid == 0 {
|
||||
return "", "", 0, nil
|
||||
}
|
||||
|
||||
hProcess, vmReadAccess, err := c.openProcess(pid)
|
||||
if err != nil {
|
||||
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
|
||||
return "", "", 0, nil
|
||||
}
|
||||
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
defer func(hProcess windows.Handle) {
|
||||
if err := windows.CloseHandle(hProcess); err != nil {
|
||||
logger.Warn("CloseHandle failed",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}(hProcess)
|
||||
|
||||
owner, err := c.getProcessOwner(logger, hProcess)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
var (
|
||||
cmdLine string
|
||||
processGroupID uint32
|
||||
)
|
||||
|
||||
if vmReadAccess {
|
||||
cmdLine, processGroupID, err = c.getExtendedProcessInformation(hProcess)
|
||||
if err != nil {
|
||||
return "", owner, processGroupID, err
|
||||
}
|
||||
}
|
||||
|
||||
return cmdLine, owner, processGroupID, nil
|
||||
}
|
||||
|
||||
func (c *Collector) getExtendedProcessInformation(hProcess windows.Handle) (string, uint32, error) {
|
||||
// Get the process environment block (PEB) address
|
||||
var pbi windows.PROCESS_BASIC_INFORMATION
|
||||
|
||||
retLen := uint32(unsafe.Sizeof(pbi))
|
||||
if err := windows.NtQueryInformationProcess(hProcess, windows.ProcessBasicInformation, unsafe.Pointer(&pbi), retLen, &retLen); err != nil {
|
||||
return "", 0, fmt.Errorf("failed to query process basic information: %w", err)
|
||||
}
|
||||
|
||||
peb := windows.PEB{}
|
||||
|
||||
err := windows.ReadProcessMemory(hProcess,
|
||||
uintptr(unsafe.Pointer(pbi.PebBaseAddress)),
|
||||
(*byte)(unsafe.Pointer(&peb)),
|
||||
unsafe.Sizeof(peb),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("failed to read process memory: %w", err)
|
||||
}
|
||||
|
||||
processParameters := windows.RTL_USER_PROCESS_PARAMETERS{}
|
||||
|
||||
err = windows.ReadProcessMemory(hProcess,
|
||||
uintptr(unsafe.Pointer(peb.ProcessParameters)),
|
||||
(*byte)(unsafe.Pointer(&processParameters)),
|
||||
unsafe.Sizeof(processParameters),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("failed to read process memory: %w", err)
|
||||
}
|
||||
|
||||
cmdLineUTF16 := make([]uint16, processParameters.CommandLine.Length)
|
||||
|
||||
err = windows.ReadProcessMemory(hProcess,
|
||||
uintptr(unsafe.Pointer(processParameters.CommandLine.Buffer)),
|
||||
(*byte)(unsafe.Pointer(&cmdLineUTF16[0])),
|
||||
uintptr(processParameters.CommandLine.Length),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return "", processParameters.ProcessGroupId, fmt.Errorf("failed to read process memory: %w", err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(windows.UTF16ToString(cmdLineUTF16)), processParameters.ProcessGroupId, nil
|
||||
}
|
||||
|
||||
func (c *Collector) getProcessOwner(logger *slog.Logger, hProcess windows.Handle) (string, error) {
|
||||
var tok windows.Token
|
||||
|
||||
if err := windows.OpenProcessToken(hProcess, windows.TOKEN_QUERY, &tok); err != nil {
|
||||
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to open process token: %w", err)
|
||||
}
|
||||
|
||||
defer func(tok windows.Token) {
|
||||
if err := tok.Close(); err != nil {
|
||||
logger.Warn("Token close failed",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}(tok)
|
||||
|
||||
tokenUser, err := tok.GetTokenUser()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get token user: %w", err)
|
||||
}
|
||||
|
||||
sid := tokenUser.User.Sid.String()
|
||||
|
||||
owner, ok := c.lookupCache[sid]
|
||||
if !ok {
|
||||
account, domain, _, err := tokenUser.User.Sid.LookupAccount("")
|
||||
if err != nil {
|
||||
owner = sid
|
||||
} else {
|
||||
owner = fmt.Sprintf(`%s\%s`, account, domain)
|
||||
}
|
||||
|
||||
c.lookupCache[sid] = owner
|
||||
}
|
||||
|
||||
return owner, nil
|
||||
}
|
||||
|
||||
func (c *Collector) openProcess(pid uint32) (windows.Handle, bool, error) {
|
||||
// Open the process with QUERY_INFORMATION and VM_READ permissions
|
||||
hProcess, err := windows.OpenProcess(windows.PROCESS_QUERY_INFORMATION|windows.PROCESS_VM_READ, false, pid)
|
||||
if err == nil {
|
||||
return hProcess, true, nil
|
||||
}
|
||||
|
||||
if !errors.Is(err, windows.ERROR_ACCESS_DENIED) {
|
||||
return 0, false, fmt.Errorf("failed to open process: %w", err)
|
||||
}
|
||||
|
||||
if errors.Is(err, windows.Errno(0x57)) { // invalid parameter, for PIDs that don't exist
|
||||
return 0, false, errors.New("process not found")
|
||||
}
|
||||
|
||||
hProcess, err = windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("failed to open process with limited permissions: %w", err)
|
||||
}
|
||||
|
||||
return hProcess, false, nil
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package process_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/process"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkProcessCollector(b *testing.B) {
|
||||
// PrinterInclude is not set in testing context (kingpin flags not parsed), causing the collector to skip all processes.
|
||||
localProcessInclude := ".+"
|
||||
kingpin.CommandLine.GetArg("collector.process.include").StringVar(&localProcessInclude)
|
||||
// No context name required as collector source is WMI
|
||||
testutils.FuncBenchmarkCollector(b, process.Name, process.NewWithFlags)
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
types2 "github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
@@ -53,31 +53,31 @@ func (c *MetricCollectors) NewPrometheusCollector(timeout time.Duration, logger
|
||||
metricCollectors: c,
|
||||
logger: logger,
|
||||
scrapeDurationDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "exporter", "scrape_duration_seconds"),
|
||||
prometheus.BuildFQName(types2.Namespace, "exporter", "scrape_duration_seconds"),
|
||||
"windows_exporter: Total scrape duration.",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
collectorScrapeDurationDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "exporter", "collector_duration_seconds"),
|
||||
prometheus.BuildFQName(types2.Namespace, "exporter", "collector_duration_seconds"),
|
||||
"windows_exporter: Duration of a collection.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
),
|
||||
collectorScrapeSuccessDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "exporter", "collector_success"),
|
||||
prometheus.BuildFQName(types2.Namespace, "exporter", "collector_success"),
|
||||
"windows_exporter: Whether the collector was successful.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
),
|
||||
collectorScrapeTimeoutDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "exporter", "collector_timeout"),
|
||||
prometheus.BuildFQName(types2.Namespace, "exporter", "collector_timeout"),
|
||||
"windows_exporter: Whether the collector timed out.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
),
|
||||
snapshotDuration: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "exporter", "perflib_snapshot_duration_seconds"),
|
||||
prometheus.BuildFQName(types2.Namespace, "exporter", "perflib_snapshot_duration_seconds"),
|
||||
"Duration of perflib snapshot capture",
|
||||
nil,
|
||||
nil,
|
||||
@@ -166,7 +166,7 @@ func (p *Prometheus) Collect(ch chan<- prometheus.Metric) {
|
||||
)
|
||||
}
|
||||
|
||||
func (p *Prometheus) execute(name string, c Collector, scrapeCtx *types.ScrapeContext, ch chan<- prometheus.Metric) collectorStatusCode {
|
||||
func (p *Prometheus) execute(name string, c Collector, scrapeCtx *types2.ScrapeContext, ch chan<- prometheus.Metric) collectorStatusCode {
|
||||
var (
|
||||
err error
|
||||
numMetrics int
|
||||
|
||||
@@ -1,445 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package remote_fx
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "remote_fx"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
// Collector
|
||||
// A RemoteFxNetworkCollector is a Prometheus Collector for
|
||||
// WMI Win32_PerfRawData_Counters_RemoteFXNetwork & Win32_PerfRawData_Counters_RemoteFXGraphics metrics
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxnetwork/
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
// net
|
||||
baseTCPRTT *prometheus.Desc
|
||||
baseUDPRTT *prometheus.Desc
|
||||
currentTCPBandwidth *prometheus.Desc
|
||||
currentTCPRTT *prometheus.Desc
|
||||
currentUDPBandwidth *prometheus.Desc
|
||||
currentUDPRTT *prometheus.Desc
|
||||
fecRate *prometheus.Desc
|
||||
lossRate *prometheus.Desc
|
||||
retransmissionRate *prometheus.Desc
|
||||
totalReceivedBytes *prometheus.Desc
|
||||
totalSentBytes *prometheus.Desc
|
||||
udpPacketsReceivedPerSec *prometheus.Desc
|
||||
udpPacketsSentPerSec *prometheus.Desc
|
||||
|
||||
// gfx
|
||||
averageEncodingTime *prometheus.Desc
|
||||
frameQuality *prometheus.Desc
|
||||
framesSkippedPerSecondInsufficientResources *prometheus.Desc
|
||||
graphicsCompressionRatio *prometheus.Desc
|
||||
inputFramesPerSecond *prometheus.Desc
|
||||
outputFramesPerSecond *prometheus.Desc
|
||||
sourceFramesPerSecond *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"RemoteFX Network", "RemoteFX Graphics"}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(*slog.Logger, *wmi.Client) error {
|
||||
// net
|
||||
c.baseTCPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
|
||||
"Base TCP round-trip time (RTT) detected in seconds",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.baseUDPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_base_udp_rtt_seconds"),
|
||||
"Base UDP round-trip time (RTT) detected in seconds.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.currentTCPBandwidth = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_current_tcp_bandwidth"),
|
||||
"TCP Bandwidth detected in bytes per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.currentTCPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_current_tcp_rtt_seconds"),
|
||||
"Average TCP round-trip time (RTT) detected in seconds.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.currentUDPBandwidth = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_current_udp_bandwidth"),
|
||||
"UDP Bandwidth detected in bytes per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.currentUDPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_current_udp_rtt_seconds"),
|
||||
"Average UDP round-trip time (RTT) detected in seconds.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.totalReceivedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_received_bytes_total"),
|
||||
"(TotalReceivedBytes)",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.totalSentBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_sent_bytes_total"),
|
||||
"(TotalSentBytes)",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.udpPacketsReceivedPerSec = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_udp_packets_received_total"),
|
||||
"Rate in packets per second at which packets are received over UDP.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.udpPacketsSentPerSec = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_udp_packets_sent_total"),
|
||||
"Rate in packets per second at which packets are sent over UDP.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.fecRate = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_fec_rate"),
|
||||
"Forward Error Correction (FEC) percentage",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.lossRate = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_loss_rate"),
|
||||
"Loss percentage",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.retransmissionRate = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_retransmission_rate"),
|
||||
"Percentage of packets that have been retransmitted",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
|
||||
// gfx
|
||||
c.averageEncodingTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gfx_average_encoding_time_seconds"),
|
||||
"Average frame encoding time in seconds",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.frameQuality = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gfx_frame_quality"),
|
||||
"Quality of the output frame expressed as a percentage of the quality of the source frame.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.framesSkippedPerSecondInsufficientResources = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gfx_frames_skipped_insufficient_resource_total"),
|
||||
"Number of frames skipped per second due to insufficient client resources.",
|
||||
[]string{"session_name", "resource"},
|
||||
nil,
|
||||
)
|
||||
c.graphicsCompressionRatio = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gfx_graphics_compression_ratio"),
|
||||
"Ratio of the number of bytes encoded to the number of bytes input.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.inputFramesPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gfx_input_frames_total"),
|
||||
"Number of sources frames provided as input to RemoteFX graphics per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.outputFramesPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gfx_output_frames_total"),
|
||||
"Number of frames sent to the client per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
c.sourceFramesPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "gfx_source_frames_total"),
|
||||
"Number of frames composed by the source (DWM) per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collectRemoteFXNetworkCount(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting terminal services session count metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.collectRemoteFXGraphicsCounters(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting terminal services session count metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibRemoteFxNetwork struct {
|
||||
Name string
|
||||
BaseTCPRTT float64 `perflib:"Base TCP RTT"`
|
||||
BaseUDPRTT float64 `perflib:"Base UDP RTT"`
|
||||
CurrentTCPBandwidth float64 `perflib:"Current TCP Bandwidth"`
|
||||
CurrentTCPRTT float64 `perflib:"Current TCP RTT"`
|
||||
CurrentUDPBandwidth float64 `perflib:"Current UDP Bandwidth"`
|
||||
CurrentUDPRTT float64 `perflib:"Current UDP RTT"`
|
||||
TotalReceivedBytes float64 `perflib:"Total Received Bytes"`
|
||||
TotalSentBytes float64 `perflib:"Total Sent Bytes"`
|
||||
UDPPacketsReceivedPersec float64 `perflib:"UDP Packets Received/sec"`
|
||||
UDPPacketsSentPersec float64 `perflib:"UDP Packets Sent/sec"`
|
||||
FECRate float64 `perflib:"Forward Error Correction (FEC) percentage"`
|
||||
LossRate float64 `perflib:"Loss percentage"`
|
||||
RetransmissionRate float64 `perflib:"Percentage of packets that have been retransmitted"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
dst := make([]perflibRemoteFxNetwork, 0)
|
||||
|
||||
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Network"], &dst, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ToLower(normalizeSessionName(d.Name))
|
||||
if n == "" || n == "services" || n == "console" {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.baseTCPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.BaseTCPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.baseUDPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.BaseUDPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentTCPBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
(d.CurrentTCPBandwidth*1000)/8,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentTCPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.CurrentTCPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentUDPBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
(d.CurrentUDPBandwidth*1000)/8,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentUDPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.CurrentUDPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalReceivedBytes,
|
||||
prometheus.CounterValue,
|
||||
d.TotalReceivedBytes,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalSentBytes,
|
||||
prometheus.CounterValue,
|
||||
d.TotalSentBytes,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.udpPacketsReceivedPerSec,
|
||||
prometheus.CounterValue,
|
||||
d.UDPPacketsReceivedPersec,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.udpPacketsSentPerSec,
|
||||
prometheus.CounterValue,
|
||||
d.UDPPacketsSentPersec,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fecRate,
|
||||
prometheus.GaugeValue,
|
||||
d.FECRate,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.lossRate,
|
||||
prometheus.GaugeValue,
|
||||
d.LossRate,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.retransmissionRate,
|
||||
prometheus.GaugeValue,
|
||||
d.RetransmissionRate,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibRemoteFxGraphics struct {
|
||||
Name string
|
||||
AverageEncodingTime float64 `perflib:"Average Encoding Time"`
|
||||
FrameQuality float64 `perflib:"Frame Quality"`
|
||||
FramesSkippedPerSecondInsufficientClientResources float64 `perflib:"Frames Skipped/Second - Insufficient Server Resources"`
|
||||
FramesSkippedPerSecondInsufficientNetworkResources float64 `perflib:"Frames Skipped/Second - Insufficient Network Resources"`
|
||||
FramesSkippedPerSecondInsufficientServerResources float64 `perflib:"Frames Skipped/Second - Insufficient Client Resources"`
|
||||
GraphicsCompressionratio float64 `perflib:"Graphics Compression ratio"`
|
||||
InputFramesPerSecond float64 `perflib:"Input Frames/Second"`
|
||||
OutputFramesPerSecond float64 `perflib:"Output Frames/Second"`
|
||||
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
dst := make([]perflibRemoteFxGraphics, 0)
|
||||
|
||||
err := perflib.UnmarshalObject(ctx.PerfObjects["RemoteFX Graphics"], &dst, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ToLower(normalizeSessionName(d.Name))
|
||||
if n == "" || n == "services" || n == "console" {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.averageEncodingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.AverageEncodingTime),
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.frameQuality,
|
||||
prometheus.GaugeValue,
|
||||
d.FrameQuality,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.framesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientClientResources,
|
||||
normalizeSessionName(d.Name),
|
||||
"client",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.framesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientNetworkResources,
|
||||
normalizeSessionName(d.Name),
|
||||
"network",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.framesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientServerResources,
|
||||
normalizeSessionName(d.Name),
|
||||
"server",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.graphicsCompressionRatio,
|
||||
prometheus.GaugeValue,
|
||||
d.GraphicsCompressionratio,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.inputFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.InputFramesPerSecond,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.OutputFramesPerSecond,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sourceFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.SourceFramesPerSecond,
|
||||
normalizeSessionName(d.Name),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// normalizeSessionName ensure that the session is the same between WTS API and performance counters.
|
||||
func normalizeSessionName(sessionName string) string {
|
||||
return strings.Replace(sessionName, "RDP-tcp", "RDP-Tcp", 1)
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package remote_fx_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/remote_fx"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, remote_fx.Name, remote_fx.NewWithFlags)
|
||||
}
|
||||
@@ -1,500 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package scheduled_task
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "scheduled_task"
|
||||
|
||||
type Config struct {
|
||||
TaskExclude *regexp.Regexp `yaml:"task_exclude"`
|
||||
TaskInclude *regexp.Regexp `yaml:"task_include"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
TaskExclude: types.RegExpEmpty,
|
||||
TaskInclude: types.RegExpAny,
|
||||
}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
scheduledTasksReqCh chan struct{}
|
||||
scheduledTasksCh chan *scheduledTaskResults
|
||||
|
||||
lastResult *prometheus.Desc
|
||||
missedRuns *prometheus.Desc
|
||||
state *prometheus.Desc
|
||||
}
|
||||
|
||||
// TaskState ...
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_state
|
||||
type TaskState uint
|
||||
|
||||
type TaskResult uint
|
||||
|
||||
const (
|
||||
TASK_STATE_UNKNOWN TaskState = iota
|
||||
TASK_STATE_DISABLED
|
||||
TASK_STATE_QUEUED
|
||||
TASK_STATE_READY
|
||||
TASK_STATE_RUNNING
|
||||
)
|
||||
|
||||
const (
|
||||
SCHED_S_SUCCESS TaskResult = 0x0
|
||||
SCHED_S_TASK_HAS_NOT_RUN TaskResult = 0x00041303
|
||||
)
|
||||
|
||||
var taskStates = []string{"disabled", "queued", "ready", "running", "unknown"}
|
||||
|
||||
type scheduledTask struct {
|
||||
Name string
|
||||
Path string
|
||||
Enabled bool
|
||||
State TaskState
|
||||
MissedRunsCount float64
|
||||
LastTaskResult TaskResult
|
||||
}
|
||||
|
||||
type scheduledTaskResults struct {
|
||||
scheduledTasks []scheduledTask
|
||||
err error
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.TaskExclude == nil {
|
||||
config.TaskExclude = ConfigDefaults.TaskExclude
|
||||
}
|
||||
|
||||
if config.TaskInclude == nil {
|
||||
config.TaskInclude = ConfigDefaults.TaskInclude
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
var taskExclude, taskInclude string
|
||||
|
||||
app.Flag(
|
||||
"collector.scheduled_task.exclude",
|
||||
"Regexp of tasks to exclude. Task path must both match include and not match exclude to be included.",
|
||||
).Default(c.config.TaskExclude.String()).StringVar(&taskExclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.scheduled_task.include",
|
||||
"Regexp of tasks to include. Task path must both match include and not match exclude to be included.",
|
||||
).Default(c.config.TaskInclude.String()).StringVar(&taskInclude)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
var err error
|
||||
|
||||
c.config.TaskExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", taskExclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.scheduled_task.exclude: %w", err)
|
||||
}
|
||||
|
||||
c.config.TaskInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", taskInclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.scheduled_task.include: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
close(c.scheduledTasksReqCh)
|
||||
|
||||
c.scheduledTasksReqCh = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
initErrCh := make(chan error)
|
||||
c.scheduledTasksReqCh = make(chan struct{})
|
||||
c.scheduledTasksCh = make(chan *scheduledTaskResults)
|
||||
|
||||
go c.initializeScheduleService(initErrCh)
|
||||
|
||||
if err := <-initErrCh; err != nil {
|
||||
return fmt.Errorf("initialize schedule service: %w", err)
|
||||
}
|
||||
|
||||
c.lastResult = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "last_result"),
|
||||
"The result that was returned the last time the registered task was run",
|
||||
[]string{"task"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.missedRuns = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "missed_runs"),
|
||||
"The number of times the registered task missed a scheduled run",
|
||||
[]string{"task"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.state = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "state"),
|
||||
"The current state of a scheduled task",
|
||||
[]string{"task", "state"},
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting user metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
scheduledTasks, err := c.getScheduledTasks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get scheduled tasks: %w", err)
|
||||
}
|
||||
|
||||
for _, task := range scheduledTasks {
|
||||
if c.config.TaskExclude.MatchString(task.Path) ||
|
||||
!c.config.TaskInclude.MatchString(task.Path) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, state := range taskStates {
|
||||
var stateValue float64
|
||||
|
||||
if strings.ToLower(task.State.String()) == state {
|
||||
stateValue = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.state,
|
||||
prometheus.GaugeValue,
|
||||
stateValue,
|
||||
task.Path,
|
||||
state,
|
||||
)
|
||||
}
|
||||
|
||||
if task.LastTaskResult == SCHED_S_TASK_HAS_NOT_RUN {
|
||||
continue
|
||||
}
|
||||
|
||||
lastResult := 0.0
|
||||
if task.LastTaskResult == SCHED_S_SUCCESS {
|
||||
lastResult = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.lastResult,
|
||||
prometheus.GaugeValue,
|
||||
lastResult,
|
||||
task.Path,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.missedRuns,
|
||||
prometheus.GaugeValue,
|
||||
task.MissedRunsCount,
|
||||
task.Path,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) getScheduledTasks() ([]scheduledTask, error) {
|
||||
c.scheduledTasksReqCh <- struct{}{}
|
||||
|
||||
scheduledTasks, ok := <-c.scheduledTasksCh
|
||||
|
||||
if !ok {
|
||||
return []scheduledTask{}, nil
|
||||
}
|
||||
|
||||
if scheduledTasks == nil {
|
||||
return nil, errors.New("scheduled tasks channel is nil")
|
||||
}
|
||||
|
||||
if scheduledTasks.err != nil {
|
||||
return nil, scheduledTasks.err
|
||||
}
|
||||
|
||||
return scheduledTasks.scheduledTasks, scheduledTasks.err
|
||||
}
|
||||
|
||||
func (c *Collector) initializeScheduleService(initErrCh chan<- error) {
|
||||
// The only way to run WMI queries in parallel while being thread-safe is to
|
||||
// ensure the CoInitialize[Ex]() call is bound to its current OS thread.
|
||||
// Otherwise, attempting to initialize and run parallel queries across
|
||||
// goroutines will result in protected memory errors.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
if err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
|
||||
var oleCode *ole.OleError
|
||||
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != wmi.S_FALSE {
|
||||
initErrCh <- err
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
scheduleClassID, err := ole.ClassIDFrom("Schedule.Service.1")
|
||||
if err != nil {
|
||||
initErrCh <- err
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
taskSchedulerObj, err := ole.CreateInstance(scheduleClassID, nil)
|
||||
if err != nil || taskSchedulerObj == nil {
|
||||
initErrCh <- err
|
||||
|
||||
return
|
||||
}
|
||||
defer taskSchedulerObj.Release()
|
||||
|
||||
taskServiceObj := taskSchedulerObj.MustQueryInterface(ole.IID_IDispatch)
|
||||
defer taskServiceObj.Release()
|
||||
|
||||
taskService, err := oleutil.CallMethod(taskServiceObj, "Connect")
|
||||
if err != nil {
|
||||
initErrCh <- err
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
defer func(taskService *ole.VARIANT) {
|
||||
_ = taskService.Clear()
|
||||
}(taskService)
|
||||
|
||||
close(initErrCh)
|
||||
|
||||
scheduledTasks := make([]scheduledTask, 0, 100)
|
||||
|
||||
for range c.scheduledTasksReqCh {
|
||||
func() {
|
||||
// Clear the slice to avoid memory leaks
|
||||
clear(scheduledTasks)
|
||||
scheduledTasks = scheduledTasks[:0]
|
||||
|
||||
res, err := oleutil.CallMethod(taskServiceObj, "GetFolder", `\`)
|
||||
if err != nil {
|
||||
c.scheduledTasksCh <- &scheduledTaskResults{err: err}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
rootFolderObj := res.ToIDispatch()
|
||||
defer rootFolderObj.Release()
|
||||
|
||||
err = fetchTasksRecursively(rootFolderObj, &scheduledTasks)
|
||||
|
||||
c.scheduledTasksCh <- &scheduledTaskResults{scheduledTasks: scheduledTasks, err: err}
|
||||
}()
|
||||
}
|
||||
|
||||
close(c.scheduledTasksCh)
|
||||
|
||||
c.scheduledTasksCh = nil
|
||||
}
|
||||
|
||||
func fetchTasksRecursively(folder *ole.IDispatch, scheduledTasks *[]scheduledTask) error {
|
||||
if err := fetchTasksInFolder(folder, scheduledTasks); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := oleutil.CallMethod(folder, "GetFolders", 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subFolders := res.ToIDispatch()
|
||||
defer subFolders.Release()
|
||||
|
||||
err = oleutil.ForEach(subFolders, func(v *ole.VARIANT) error {
|
||||
subFolder := v.ToIDispatch()
|
||||
defer subFolder.Release()
|
||||
|
||||
return fetchTasksRecursively(subFolder, scheduledTasks)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func fetchTasksInFolder(folder *ole.IDispatch, scheduledTasks *[]scheduledTask) error {
|
||||
res, err := oleutil.CallMethod(folder, "GetTasks", 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tasks := res.ToIDispatch()
|
||||
defer tasks.Release()
|
||||
|
||||
err = oleutil.ForEach(tasks, func(v *ole.VARIANT) error {
|
||||
task := v.ToIDispatch()
|
||||
defer task.Release()
|
||||
|
||||
parsedTask, err := parseTask(task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*scheduledTasks = append(*scheduledTasks, parsedTask)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func parseTask(task *ole.IDispatch) (scheduledTask, error) {
|
||||
var scheduledTask scheduledTask
|
||||
|
||||
taskNameVar, err := oleutil.GetProperty(task, "Name")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if tempErr := taskNameVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskPathVar, err := oleutil.GetProperty(task, "Path")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if tempErr := taskPathVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskEnabledVar, err := oleutil.GetProperty(task, "Enabled")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if tempErr := taskEnabledVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskStateVar, err := oleutil.GetProperty(task, "State")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if tempErr := taskStateVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskNumberOfMissedRunsVar, err := oleutil.GetProperty(task, "NumberOfMissedRuns")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if tempErr := taskNumberOfMissedRunsVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskLastTaskResultVar, err := oleutil.GetProperty(task, "LastTaskResult")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if tempErr := taskLastTaskResultVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
scheduledTask.Name = taskNameVar.ToString()
|
||||
scheduledTask.Path = strings.ReplaceAll(taskPathVar.ToString(), "\\", "/")
|
||||
|
||||
if val, ok := taskEnabledVar.Value().(bool); ok {
|
||||
scheduledTask.Enabled = val
|
||||
}
|
||||
|
||||
scheduledTask.State = TaskState(taskStateVar.Val)
|
||||
scheduledTask.MissedRunsCount = float64(taskNumberOfMissedRunsVar.Val)
|
||||
scheduledTask.LastTaskResult = TaskResult(taskLastTaskResultVar.Val)
|
||||
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
func (t TaskState) String() string {
|
||||
switch t {
|
||||
case TASK_STATE_UNKNOWN:
|
||||
return "Unknown"
|
||||
case TASK_STATE_DISABLED:
|
||||
return "Disabled"
|
||||
case TASK_STATE_QUEUED:
|
||||
return "Queued"
|
||||
case TASK_STATE_READY:
|
||||
return "Ready"
|
||||
case TASK_STATE_RUNNING:
|
||||
return "Running"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package scheduled_task_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/scheduled_task"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, scheduled_task.Name, scheduled_task.NewWithFlags)
|
||||
}
|
||||
@@ -1,416 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/svc/mgr"
|
||||
)
|
||||
|
||||
const Name = "service"
|
||||
|
||||
type Config struct {
|
||||
ServiceInclude *regexp.Regexp `yaml:"service_include"`
|
||||
ServiceExclude *regexp.Regexp `yaml:"service_exclude"`
|
||||
}
|
||||
|
||||
var ConfigDefaults = Config{
|
||||
ServiceInclude: types.RegExpAny,
|
||||
ServiceExclude: types.RegExpEmpty,
|
||||
}
|
||||
|
||||
// A Collector is a Prometheus Collector for service metrics.
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
state *prometheus.Desc
|
||||
processID *prometheus.Desc
|
||||
info *prometheus.Desc
|
||||
startMode *prometheus.Desc
|
||||
|
||||
serviceManagerHandle *mgr.Mgr
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
if config.ServiceExclude == nil {
|
||||
config.ServiceExclude = ConfigDefaults.ServiceExclude
|
||||
}
|
||||
|
||||
if config.ServiceInclude == nil {
|
||||
config.ServiceInclude = ConfigDefaults.ServiceInclude
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(app *kingpin.Application) *Collector {
|
||||
c := &Collector{
|
||||
config: ConfigDefaults,
|
||||
}
|
||||
|
||||
var serviceExclude, serviceInclude string
|
||||
|
||||
app.Flag(
|
||||
"collector.service.exclude",
|
||||
"Regexp of service to exclude. Service name (not the display name!) must both match include and not match exclude to be included.",
|
||||
).Default(c.config.ServiceExclude.String()).StringVar(&serviceExclude)
|
||||
|
||||
app.Flag(
|
||||
"collector.service.include",
|
||||
"Regexp of service to include. Process name (not the display name!) must both match include and not match exclude to be included.",
|
||||
).Default(c.config.ServiceInclude.String()).StringVar(&serviceInclude)
|
||||
|
||||
app.Action(func(*kingpin.ParseContext) error {
|
||||
var err error
|
||||
|
||||
c.config.ServiceExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", serviceExclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.process.exclude: %w", err)
|
||||
}
|
||||
|
||||
c.config.ServiceInclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", serviceInclude))
|
||||
if err != nil {
|
||||
return fmt.Errorf("collector.process.include: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *wmi.Client) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
if c.config.ServiceInclude.String() == "^(?:.*)$" && c.config.ServiceExclude.String() == "^(?:)$" {
|
||||
logger.Warn("No filters specified for service collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
c.info = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"A metric with a constant '1' value labeled with service information",
|
||||
[]string{"name", "display_name", "run_as", "path_name"},
|
||||
nil,
|
||||
)
|
||||
c.state = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "state"),
|
||||
"The state of the service (State)",
|
||||
[]string{"name", "state"},
|
||||
nil,
|
||||
)
|
||||
c.startMode = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "start_mode"),
|
||||
"The start mode of the service (StartMode)",
|
||||
[]string{"name", "start_mode"},
|
||||
nil,
|
||||
)
|
||||
c.processID = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "process"),
|
||||
"Process of started service. The value is the creation time of the process as a unix timestamp.",
|
||||
[]string{"name", "process_id"},
|
||||
nil,
|
||||
)
|
||||
|
||||
// EnumServiceStatusEx requires only SC_MANAGER_ENUM_SERVICE.
|
||||
handle, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open scm: %w", err)
|
||||
}
|
||||
|
||||
c.serviceManagerHandle = &mgr.Mgr{Handle: handle}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(logger *slog.Logger) error {
|
||||
if err := c.serviceManagerHandle.Disconnect(); err != nil {
|
||||
logger.Warn("Failed to disconnect from scm",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
if err := c.collect(logger, ch); err != nil {
|
||||
logger.Error("failed collecting API service metrics:",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return fmt.Errorf("failed collecting API service metrics: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) collect(logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
services, err := c.queryAllServices()
|
||||
if err != nil {
|
||||
logger.Warn("Failed to query services",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if services == nil {
|
||||
logger.Warn("No services queried")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through the Services List.
|
||||
for _, service := range services {
|
||||
serviceName := windows.UTF16PtrToString(service.ServiceName)
|
||||
if c.config.ServiceExclude.MatchString(serviceName) ||
|
||||
!c.config.ServiceInclude.MatchString(serviceName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := c.collectService(ch, logger, service); err != nil {
|
||||
logger.Warn("failed collecting service info",
|
||||
slog.Any("err", err),
|
||||
slog.String("service", windows.UTF16PtrToString(service.ServiceName)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var apiStateValues = map[uint32]string{
|
||||
windows.SERVICE_CONTINUE_PENDING: "continue pending",
|
||||
windows.SERVICE_PAUSE_PENDING: "pause pending",
|
||||
windows.SERVICE_PAUSED: "paused",
|
||||
windows.SERVICE_RUNNING: "running",
|
||||
windows.SERVICE_START_PENDING: "start pending",
|
||||
windows.SERVICE_STOP_PENDING: "stop pending",
|
||||
windows.SERVICE_STOPPED: "stopped",
|
||||
}
|
||||
|
||||
var apiStartModeValues = map[uint32]string{
|
||||
windows.SERVICE_AUTO_START: "auto",
|
||||
windows.SERVICE_BOOT_START: "boot",
|
||||
windows.SERVICE_DEMAND_START: "manual",
|
||||
windows.SERVICE_DISABLED: "disabled",
|
||||
windows.SERVICE_SYSTEM_START: "system",
|
||||
}
|
||||
|
||||
func (c *Collector) collectService(ch chan<- prometheus.Metric, logger *slog.Logger, service windows.ENUM_SERVICE_STATUS_PROCESS) error {
|
||||
// Open connection for service handler.
|
||||
serviceHandle, err := windows.OpenService(c.serviceManagerHandle.Handle, service.ServiceName, windows.SERVICE_QUERY_CONFIG)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open service: %w", err)
|
||||
}
|
||||
|
||||
serviceNameString := windows.UTF16PtrToString(service.ServiceName)
|
||||
|
||||
// Create handle for each service.
|
||||
serviceManager := &mgr.Service{Name: serviceNameString, Handle: serviceHandle}
|
||||
defer func(serviceManager *mgr.Service) {
|
||||
if err := serviceManager.Close(); err != nil {
|
||||
logger.Warn("failed to close service handle",
|
||||
slog.Any("err", err),
|
||||
slog.String("service", serviceNameString),
|
||||
)
|
||||
}
|
||||
}(serviceManager)
|
||||
|
||||
// Get Service Configuration.
|
||||
serviceConfig, err := serviceManager.Config()
|
||||
if err != nil {
|
||||
if !errors.Is(err, windows.ERROR_FILE_NOT_FOUND) && !errors.Is(err, windows.ERROR_MUI_FILE_NOT_FOUND) {
|
||||
return fmt.Errorf("failed to get service configuration: %w", err)
|
||||
}
|
||||
|
||||
logger.Debug("failed collecting service",
|
||||
slog.Any("err", err),
|
||||
slog.String("service", serviceNameString),
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.info,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
serviceNameString,
|
||||
serviceConfig.DisplayName,
|
||||
serviceConfig.ServiceStartName,
|
||||
serviceConfig.BinaryPathName,
|
||||
)
|
||||
|
||||
var (
|
||||
isCurrentStartMode float64
|
||||
isCurrentState float64
|
||||
)
|
||||
|
||||
for _, startMode := range apiStartModeValues {
|
||||
isCurrentStartMode = 0.0
|
||||
if startMode == apiStartModeValues[serviceConfig.StartType] {
|
||||
isCurrentStartMode = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.startMode,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentStartMode,
|
||||
serviceNameString,
|
||||
startMode,
|
||||
)
|
||||
}
|
||||
|
||||
for state, stateValue := range apiStateValues {
|
||||
isCurrentState = 0.0
|
||||
if state == service.ServiceStatusProcess.CurrentState {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.state,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
serviceNameString,
|
||||
stateValue,
|
||||
)
|
||||
}
|
||||
|
||||
processID := strconv.FormatUint(uint64(service.ServiceStatusProcess.ProcessId), 10)
|
||||
|
||||
if processID != "0" { //nolint: nestif
|
||||
processStartTime, err := getProcessStartTime(logger, service.ServiceStatusProcess.ProcessId)
|
||||
if err != nil {
|
||||
if errors.Is(err, windows.ERROR_ACCESS_DENIED) {
|
||||
logger.Debug("failed to get process start time",
|
||||
slog.String("service", serviceNameString),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
} else {
|
||||
logger.Warn("failed to get process start time",
|
||||
slog.String("service", serviceNameString),
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processID,
|
||||
prometheus.GaugeValue,
|
||||
float64(processStartTime/1_000_000_000),
|
||||
serviceNameString,
|
||||
processID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryAllServices returns all service states of the current Windows system
|
||||
// This is realized by ask Service Manager directly.
|
||||
func (c *Collector) queryAllServices() ([]windows.ENUM_SERVICE_STATUS_PROCESS, error) {
|
||||
var (
|
||||
bytesNeeded uint32
|
||||
servicesReturned uint32
|
||||
resumeHandle uint32
|
||||
)
|
||||
|
||||
if err := windows.EnumServicesStatusEx(
|
||||
c.serviceManagerHandle.Handle,
|
||||
windows.SC_STATUS_PROCESS_INFO,
|
||||
windows.SERVICE_WIN32,
|
||||
windows.SERVICE_STATE_ALL,
|
||||
nil,
|
||||
0,
|
||||
&bytesNeeded,
|
||||
&servicesReturned,
|
||||
&resumeHandle,
|
||||
nil,
|
||||
); !errors.Is(err, windows.ERROR_MORE_DATA) {
|
||||
return nil, fmt.Errorf("could not fetch buffer size for EnumServicesStatusEx: %w", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, bytesNeeded)
|
||||
if err := windows.EnumServicesStatusEx(
|
||||
c.serviceManagerHandle.Handle,
|
||||
windows.SC_STATUS_PROCESS_INFO,
|
||||
windows.SERVICE_WIN32,
|
||||
windows.SERVICE_STATE_ALL,
|
||||
&buf[0],
|
||||
bytesNeeded,
|
||||
&bytesNeeded,
|
||||
&servicesReturned,
|
||||
&resumeHandle,
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("could not query windows service list: %w", err)
|
||||
}
|
||||
|
||||
if servicesReturned == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
services := unsafe.Slice((*windows.ENUM_SERVICE_STATUS_PROCESS)(unsafe.Pointer(&buf[0])), int(servicesReturned))
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func getProcessStartTime(logger *slog.Logger, pid uint32) (uint64, error) {
|
||||
handle, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to open process %w", err)
|
||||
}
|
||||
|
||||
defer func(handle windows.Handle) {
|
||||
err := windows.CloseHandle(handle)
|
||||
if err != nil {
|
||||
logger.Warn("failed to close process handle",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
}
|
||||
}(handle)
|
||||
|
||||
var creation windows.Filetime
|
||||
|
||||
var exit windows.Filetime
|
||||
|
||||
var krn windows.Filetime
|
||||
|
||||
var user windows.Filetime
|
||||
|
||||
err = windows.GetProcessTimes(handle, &creation, &exit, &krn, &user)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get process times %w", err)
|
||||
}
|
||||
|
||||
return uint64(creation.Nanoseconds()), nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package service_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/service"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, service.Name, service.NewWithFlags)
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package smb
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const Name = "smb"
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
treeConnectCount *prometheus.Desc
|
||||
currentOpenFileCount *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{
|
||||
"SMB Server Shares",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
// desc creates a new prometheus description
|
||||
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "smb", metricName),
|
||||
description,
|
||||
labels,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
c.currentOpenFileCount = desc("server_shares_current_open_file_count", "Current total count open files on the SMB Server")
|
||||
c.treeConnectCount = desc("server_shares_tree_connect_count", "Count of user connections to the SMB Server")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect collects smb metrics and sends them to prometheus.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collectServerShares(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed to collect server share metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: SMB Server Shares.
|
||||
type perflibServerShares struct {
|
||||
Name string
|
||||
|
||||
CurrentOpenFileCount float64 `perflib:"Current Open File Count"`
|
||||
TreeConnectCount float64 `perflib:"Tree Connect Count"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectServerShares(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibServerShares
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Server Shares"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
labelName := c.toLabelName(instance.Name)
|
||||
if !strings.HasSuffix(labelName, "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentOpenFileCount,
|
||||
prometheus.CounterValue,
|
||||
instance.CurrentOpenFileCount,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.treeConnectCount,
|
||||
prometheus.CounterValue,
|
||||
instance.TreeConnectCount,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// toLabelName converts strings to lowercase and replaces all whitespaces and dots with underscores.
|
||||
func (c *Collector) toLabelName(name string) string {
|
||||
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
|
||||
s = strings.ReplaceAll(s, "__", "_")
|
||||
|
||||
return s
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package smb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smb"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, smb.Name, smb.NewWithFlags)
|
||||
}
|
||||
@@ -1,390 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package smbclient
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/yusufpapurcu/wmi"
|
||||
)
|
||||
|
||||
const (
|
||||
Name = "smbclient"
|
||||
)
|
||||
|
||||
type Config struct{}
|
||||
|
||||
var ConfigDefaults = Config{}
|
||||
|
||||
type Collector struct {
|
||||
config Config
|
||||
|
||||
readBytesTotal *prometheus.Desc
|
||||
readBytesTransmittedViaSMBDirectTotal *prometheus.Desc
|
||||
readRequestQueueSecsTotal *prometheus.Desc
|
||||
readRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
|
||||
readSecsTotal *prometheus.Desc
|
||||
readsTotal *prometheus.Desc
|
||||
turboIOReadsTotal *prometheus.Desc
|
||||
TurboIOWritesTotal *prometheus.Desc
|
||||
writeBytesTotal *prometheus.Desc
|
||||
writeBytesTransmittedViaSMBDirectTotal *prometheus.Desc
|
||||
writeRequestQueueSecsTotal *prometheus.Desc
|
||||
writeRequestsTransmittedViaSMBDirectTotal *prometheus.Desc
|
||||
writeSecsTotal *prometheus.Desc
|
||||
writesTotal *prometheus.Desc
|
||||
|
||||
creditStallsTotal *prometheus.Desc
|
||||
currentDataQueued *prometheus.Desc
|
||||
dataBytesTotal *prometheus.Desc
|
||||
dataRequestsTotal *prometheus.Desc
|
||||
metadataRequestsTotal *prometheus.Desc
|
||||
requestQueueSecsTotal *prometheus.Desc
|
||||
requestSecs *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
if config == nil {
|
||||
config = &ConfigDefaults
|
||||
}
|
||||
|
||||
c := &Collector{
|
||||
config: *config,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWithFlags(_ *kingpin.Application) *Collector {
|
||||
return &Collector{}
|
||||
}
|
||||
|
||||
func (c *Collector) GetName() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{
|
||||
"SMB Client Shares",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error {
|
||||
// desc creates a new prometheus description
|
||||
desc := func(metricName string, description string, labels []string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "smbclient", metricName),
|
||||
description,
|
||||
labels,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
c.requestQueueSecsTotal = desc("data_queue_seconds_total",
|
||||
"Seconds requests waited on queue on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.readRequestQueueSecsTotal = desc("read_queue_seconds_total",
|
||||
"Seconds read requests waited on queue on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.writeRequestQueueSecsTotal = desc("write_queue_seconds_total",
|
||||
"Seconds write requests waited on queue on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.requestSecs = desc("request_seconds_total",
|
||||
"Seconds waiting for requests on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.creditStallsTotal = desc("stalls_total",
|
||||
"The number of requests delayed based on insufficient credits on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.currentDataQueued = desc("requests_queued",
|
||||
"The point in time number of requests outstanding on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.dataBytesTotal = desc("data_bytes_total",
|
||||
"The bytes read or written on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.dataRequestsTotal = desc("requests_total",
|
||||
"The requests on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.metadataRequestsTotal = desc("metadata_requests_total",
|
||||
"The metadata requests on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.readBytesTransmittedViaSMBDirectTotal = desc("read_bytes_via_smbdirect_total",
|
||||
"The bytes read from this share via RDMA direct placement",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.readBytesTotal = desc("read_bytes_total",
|
||||
"The bytes read on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.readRequestsTransmittedViaSMBDirectTotal = desc("read_requests_via_smbdirect_total",
|
||||
"The read requests on this share via RDMA direct placement",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.readsTotal = desc("read_requests_total",
|
||||
"The read requests on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.turboIOReadsTotal = desc("turbo_io_reads_total",
|
||||
"The read requests that go through Turbo I/O",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.TurboIOWritesTotal = desc("turbo_io_writes_total",
|
||||
"The write requests that go through Turbo I/O",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.writeBytesTransmittedViaSMBDirectTotal = desc("write_bytes_via_smbdirect_total",
|
||||
"The written bytes to this share via RDMA direct placement",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.writeBytesTotal = desc("write_bytes_total",
|
||||
"The bytes written on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.writeRequestsTransmittedViaSMBDirectTotal = desc("write_requests_via_smbdirect_total",
|
||||
"The write requests to this share via RDMA direct placement",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.writesTotal = desc("write_requests_total",
|
||||
"The write requests on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.readSecsTotal = desc("read_seconds_total",
|
||||
"Seconds waiting for read requests on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
c.writeSecsTotal = desc("write_seconds_total",
|
||||
"Seconds waiting for write requests on this share",
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect collects smb client metrics and sends them to prometheus.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collectClientShares(ctx, logger, ch); err != nil {
|
||||
logger.Error("Error in ClientShares",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: SMB Client Shares.
|
||||
type perflibClientShares struct {
|
||||
Name string
|
||||
|
||||
AvgDataQueueLength float64 `perflib:"Avg. Data Queue Length"`
|
||||
AvgReadQueueLength float64 `perflib:"Avg. Read Queue Length"`
|
||||
AvgSecPerRead float64 `perflib:"Avg. sec/Read"`
|
||||
AvgSecPerWrite float64 `perflib:"Avg. sec/Write"`
|
||||
AvgSecPerDataRequest float64 `perflib:"Avg. sec/Data Request"`
|
||||
AvgWriteQueueLength float64 `perflib:"Avg. Write Queue Length"`
|
||||
CreditStallsPerSec float64 `perflib:"Credit Stalls/sec"`
|
||||
CurrentDataQueueLength float64 `perflib:"Current Data Queue Length"`
|
||||
DataBytesPerSec float64 `perflib:"Data Bytes/sec"`
|
||||
DataRequestsPerSec float64 `perflib:"Data Requests/sec"`
|
||||
MetadataRequestsPerSec float64 `perflib:"Metadata Requests/sec"`
|
||||
ReadBytesTransmittedViaSMBDirectPerSec float64 `perflib:"Read Bytes transmitted via SMB Direct/sec"`
|
||||
ReadBytesPerSec float64 `perflib:"Read Bytes/sec"`
|
||||
ReadRequestsTransmittedViaSMBDirectPerSec float64 `perflib:"Read Requests transmitted via SMB Direct/sec"`
|
||||
ReadRequestsPerSec float64 `perflib:"Read Requests/sec"`
|
||||
TurboIOReadsPerSec float64 `perflib:"Turbo I/O Reads/sec"`
|
||||
TurboIOWritesPerSec float64 `perflib:"Turbo I/O Writes/sec"`
|
||||
WriteBytesTransmittedViaSMBDirectPerSec float64 `perflib:"Write Bytes transmitted via SMB Direct/sec"`
|
||||
WriteBytesPerSec float64 `perflib:"Write Bytes/sec"`
|
||||
WriteRequestsTransmittedViaSMBDirectPerSec float64 `perflib:"Write Requests transmitted via SMB Direct/sec"`
|
||||
WriteRequestsPerSec float64 `perflib:"Write Requests/sec"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectClientShares(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var data []perflibClientShares
|
||||
|
||||
if err := perflib.UnmarshalObject(ctx.PerfObjects["SMB Client Shares"], &data, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
if instance.Name == "_Total" {
|
||||
continue
|
||||
}
|
||||
|
||||
parsed := strings.FieldsFunc(instance.Name, func(r rune) bool { return r == '\\' })
|
||||
serverValue := parsed[0]
|
||||
shareValue := parsed[1]
|
||||
// Request time spent on queue. Convert from ticks to seconds.
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestQueueSecsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.AvgDataQueueLength*perflib.TicksToSecondScaleFactor,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
// Read time spent on queue. Convert from ticks to seconds.
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readRequestQueueSecsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.AvgReadQueueLength*perflib.TicksToSecondScaleFactor,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readSecsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.AvgSecPerRead*perflib.TicksToSecondScaleFactor,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeSecsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.AvgSecPerWrite*perflib.TicksToSecondScaleFactor,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.requestSecs,
|
||||
prometheus.CounterValue,
|
||||
instance.AvgSecPerDataRequest*perflib.TicksToSecondScaleFactor,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
// Write time spent on queue. Convert from ticks to seconds.
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeRequestQueueSecsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.AvgWriteQueueLength*perflib.TicksToSecondScaleFactor,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.creditStallsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.CreditStallsPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentDataQueued,
|
||||
prometheus.GaugeValue,
|
||||
instance.CurrentDataQueueLength,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.DataBytesPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.dataRequestsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.DataRequestsPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataRequestsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.MetadataRequestsPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readBytesTransmittedViaSMBDirectTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.ReadBytesTransmittedViaSMBDirectPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.ReadBytesPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readRequestsTransmittedViaSMBDirectTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.ReadRequestsTransmittedViaSMBDirectPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.ReadRequestsPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.turboIOReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.TurboIOReadsPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TurboIOWritesTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.TurboIOWritesPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeBytesTransmittedViaSMBDirectTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.WriteBytesTransmittedViaSMBDirectPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.WriteBytesPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeRequestsTransmittedViaSMBDirectTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.WriteRequestsTransmittedViaSMBDirectPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writesTotal,
|
||||
prometheus.CounterValue,
|
||||
instance.WriteRequestsPerSec,
|
||||
serverValue, shareValue,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package smbclient_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/pkg/collector/smbclient"
|
||||
"github.com/prometheus-community/windows_exporter/pkg/testutils"
|
||||
)
|
||||
|
||||
func BenchmarkCollector(b *testing.B) {
|
||||
testutils.FuncBenchmarkCollector(b, smbclient.Name, smbclient.NewWithFlags)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user