mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 05:56:37 +00:00
smb: extend smb share metrics (#1765)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
@@ -265,7 +265,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("DirectoryServices", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector("DirectoryServices", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Certification Authority", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Certification Authority", perfdata.InstancesAll, []string{
|
||||
requestsPerSecond,
|
||||
requestProcessingTime,
|
||||
retrievalsPerSecond,
|
||||
|
||||
4
internal/collector/cache/cache.go
vendored
4
internal/collector/cache/cache.go
vendored
@@ -86,7 +86,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Cache", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Cache", perfdata.InstancesAll, []string{
|
||||
asyncCopyReadsTotal,
|
||||
asyncDataMapsTotal,
|
||||
asyncFastReadsTotal,
|
||||
@@ -306,7 +306,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect Cache metrics: %w", err)
|
||||
}
|
||||
|
||||
cacheData, ok := data[perfdata.EmptyInstance]
|
||||
cacheData, ok := data[perfdata.InstanceEmpty]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for Cache returned empty result set")
|
||||
|
||||
@@ -78,7 +78,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
c.mu = sync.Mutex{}
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Processor Information", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Processor Information", perfdata.InstancesAll, []string{
|
||||
c1TimeSeconds,
|
||||
c2TimeSeconds,
|
||||
c3TimeSeconds,
|
||||
|
||||
@@ -146,7 +146,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection, err = perfdata.NewCollector("DFS Replication Connections", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorConnection, err = perfdata.NewCollector("DFS Replication Connections", perfdata.InstancesAll, []string{
|
||||
bandwidthSavingsUsingDFSReplicationTotal,
|
||||
bytesReceivedTotal,
|
||||
compressedSizeOfFilesReceivedTotal,
|
||||
@@ -163,7 +163,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder, err = perfdata.NewCollector("DFS Replicated Folders", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorFolder, err = perfdata.NewCollector("DFS Replicated Folders", perfdata.InstancesAll, []string{
|
||||
bandwidthSavingsUsingDFSReplicationTotal,
|
||||
compressedSizeOfFilesReceivedTotal,
|
||||
conflictBytesCleanedUpTotal,
|
||||
@@ -198,7 +198,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume, err = perfdata.NewCollector("DFS Replication Service Volumes", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorVolume, err = perfdata.NewCollector("DFS Replication Service Volumes", perfdata.InstancesAll, []string{
|
||||
databaseCommitsTotal,
|
||||
databaseLookupsTotal,
|
||||
usnJournalRecordsReadTotal,
|
||||
|
||||
@@ -273,7 +273,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect DHCP Server metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for DHCP Server returned empty result set")
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("DNS", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("DNS", perfdata.InstancesAll, []string{
|
||||
axfrRequestReceived,
|
||||
axfrRequestSent,
|
||||
axfrResponseReceived,
|
||||
@@ -269,7 +269,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect DNS metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for DNS returned empty result set")
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func (c *Collector) buildActiveSync() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorActiveSync, err = perfdata.NewCollector("MSExchange ActiveSync", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorActiveSync, err = perfdata.NewCollector("MSExchange ActiveSync", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (c *Collector) buildADAccessProcesses() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector("MSExchange ADAccess Processes", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector("MSExchange ADAccess Processes", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func (c *Collector) buildAutoDiscover() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector("MSExchange Autodiscover", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector("MSExchange Autodiscover", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func (c *Collector) buildAvailabilityService() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector("MSExchange Availability Service", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector("MSExchange Availability Service", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func (c *Collector) buildHTTPProxy() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector("MSExchange HttpProxy", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector("MSExchange HttpProxy", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func (c *Collector) buildMapiHttpEmsmdb() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector("MSExchange MapiHttp Emsmdb", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector("MSExchange MapiHttp Emsmdb", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func (c *Collector) buildOWA() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorOWA, err = perfdata.NewCollector("MSExchange OWA", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorOWA, err = perfdata.NewCollector("MSExchange OWA", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func (c *Collector) buildRPC() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector("MSExchange RpcClientAccess", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector("MSExchange RpcClientAccess", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func (c *Collector) buildTransportQueues() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector("MSExchangeTransport Queues", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector("MSExchangeTransport Queues", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (c *Collector) buildWorkloadManagementWorkloads() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector("MSExchange WorkloadManagement Workloads", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector("MSExchange WorkloadManagement Workloads", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ const (
|
||||
func (c *Collector) buildDataStore() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorDataStore, err = perfdata.NewCollector("Hyper-V DataStore", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorDataStore, err = perfdata.NewCollector("Hyper-V DataStore", perfdata.InstancesAll, []string{
|
||||
dataStoreFragmentationRatio,
|
||||
dataStoreSectorSize,
|
||||
dataStoreDataAlignment,
|
||||
|
||||
@@ -30,7 +30,7 @@ func (c *Collector) buildDynamicMemoryBalancer() error {
|
||||
var err error
|
||||
|
||||
// https://learn.microsoft.com/en-us/archive/blogs/chrisavis/monitoring-dynamic-memory-in-windows-server-hyper-v-2012
|
||||
c.perfDataCollectorDynamicMemoryBalancer, err = perfdata.NewCollector("Hyper-V Dynamic Memory Balancer", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorDynamicMemoryBalancer, err = perfdata.NewCollector("Hyper-V Dynamic Memory Balancer", perfdata.InstancesAll, []string{
|
||||
vmDynamicMemoryBalancerAvailableMemory,
|
||||
vmDynamicMemoryBalancerAvailableMemoryForBalancing,
|
||||
vmDynamicMemoryBalancerAveragePressure,
|
||||
|
||||
@@ -42,7 +42,7 @@ const (
|
||||
func (c *Collector) buildDynamicMemoryVM() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorDynamicMemoryVM, err = perfdata.NewCollector("Hyper-V Dynamic Memory VM", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorDynamicMemoryVM, err = perfdata.NewCollector("Hyper-V Dynamic Memory VM", perfdata.InstancesAll, []string{
|
||||
vmMemoryAddedMemory,
|
||||
vmMemoryCurrentPressure,
|
||||
vmMemoryGuestVisiblePhysicalMemory,
|
||||
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
func (c *Collector) buildHypervisorLogicalProcessor() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorHypervisorLogicalProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Logical Processor", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorHypervisorLogicalProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Logical Processor", perfdata.InstancesAll, []string{
|
||||
hypervisorLogicalProcessorGuestRunTimePercent,
|
||||
hypervisorLogicalProcessorHypervisorRunTimePercent,
|
||||
hypervisorLogicalProcessorTotalRunTimePercent,
|
||||
|
||||
@@ -35,7 +35,7 @@ const (
|
||||
func (c *Collector) buildHypervisorRootVirtualProcessor() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorHypervisorRootVirtualProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Root Virtual Processor", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorHypervisorRootVirtualProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Root Virtual Processor", perfdata.InstancesAll, []string{
|
||||
hypervisorRootVirtualProcessorGuestIdleTimePercent,
|
||||
hypervisorRootVirtualProcessorGuestRunTimePercent,
|
||||
hypervisorRootVirtualProcessorHypervisorRunTimePercent,
|
||||
|
||||
@@ -35,7 +35,7 @@ const (
|
||||
func (c *Collector) buildHypervisorVirtualProcessor() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorHypervisorVirtualProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Virtual Processor", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorHypervisorVirtualProcessor, err = perfdata.NewCollector("Hyper-V Hypervisor Virtual Processor", perfdata.InstancesAll, []string{
|
||||
hypervisorVirtualProcessorGuestRunTimePercent,
|
||||
hypervisorVirtualProcessorGuestIdleTimePercent,
|
||||
hypervisorVirtualProcessorHypervisorRunTimePercent,
|
||||
|
||||
@@ -33,7 +33,7 @@ const (
|
||||
func (c *Collector) buildLegacyNetworkAdapter() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorLegacyNetworkAdapter, err = perfdata.NewCollector("Hyper-V Legacy Network Adapter", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorLegacyNetworkAdapter, err = perfdata.NewCollector("Hyper-V Legacy Network Adapter", perfdata.InstancesAll, []string{
|
||||
legacyNetworkAdapterBytesDropped,
|
||||
legacyNetworkAdapterBytesReceived,
|
||||
legacyNetworkAdapterBytesSent,
|
||||
|
||||
@@ -51,7 +51,7 @@ func (c *Collector) collectVirtualMachineHealthSummary(ch chan<- prometheus.Metr
|
||||
return fmt.Errorf("failed to collect Hyper-V Virtual Machine Health Summary metrics: %w", err)
|
||||
}
|
||||
|
||||
healthData, ok := data[perfdata.EmptyInstance]
|
||||
healthData, ok := data[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("no data returned for Hyper-V Virtual Machine Health Summary")
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ const (
|
||||
func (c *Collector) buildVirtualMachineVidPartition() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorVirtualMachineVidPartition, err = perfdata.NewCollector("Hyper-V VM Vid Partition", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorVirtualMachineVidPartition, err = perfdata.NewCollector("Hyper-V VM Vid Partition", perfdata.InstancesAll, []string{
|
||||
physicalPagesAllocated,
|
||||
preferredNUMANodeIndex,
|
||||
remotePhysicalPages,
|
||||
|
||||
@@ -33,7 +33,7 @@ const (
|
||||
func (c *Collector) buildVirtualNetworkAdapter() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorVirtualNetworkAdapter, err = perfdata.NewCollector("Hyper-V Virtual Network Adapter", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorVirtualNetworkAdapter, err = perfdata.NewCollector("Hyper-V Virtual Network Adapter", perfdata.InstancesAll, []string{
|
||||
virtualNetworkAdapterBytesReceived,
|
||||
virtualNetworkAdapterBytesSent,
|
||||
virtualNetworkAdapterDroppedPacketsIncoming,
|
||||
|
||||
@@ -148,7 +148,7 @@ const (
|
||||
func (c *Collector) buildVirtualNetworkAdapterDropReasons() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = perfdata.NewCollector("Hyper-V Virtual Network Adapter Drop Reasons", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = perfdata.NewCollector("Hyper-V Virtual Network Adapter Drop Reasons", perfdata.InstancesAll, []string{
|
||||
virtualNetworkAdapterDropReasonsOutgoingNativeFwdingReq,
|
||||
virtualNetworkAdapterDropReasonsIncomingNativeFwdingReq,
|
||||
virtualNetworkAdapterDropReasonsOutgoingMTUMismatch,
|
||||
|
||||
@@ -55,7 +55,7 @@ const (
|
||||
func (c *Collector) buildVirtualSMB() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorVirtualSMB, err = perfdata.NewCollector("Hyper-V Virtual SMB", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorVirtualSMB, err = perfdata.NewCollector("Hyper-V Virtual SMB", perfdata.InstancesAll, []string{
|
||||
virtualSMBDirectMappedSections,
|
||||
virtualSMBDirectMappedPages,
|
||||
virtualSMBWriteBytesRDMA,
|
||||
|
||||
@@ -45,7 +45,7 @@ const (
|
||||
func (c *Collector) buildVirtualStorageDevice() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorVirtualStorageDevice, err = perfdata.NewCollector("Hyper-V Virtual Storage Device", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorVirtualStorageDevice, err = perfdata.NewCollector("Hyper-V Virtual Storage Device", perfdata.InstancesAll, []string{
|
||||
virtualStorageDeviceErrorCount,
|
||||
virtualStorageDeviceQueueLength,
|
||||
virtualStorageDeviceReadBytes,
|
||||
|
||||
@@ -62,7 +62,7 @@ const (
|
||||
func (c *Collector) buildVirtualSwitch() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorVirtualSwitch, err = perfdata.NewCollector("Hyper-V Virtual Switch", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorVirtualSwitch, err = perfdata.NewCollector("Hyper-V Virtual Switch", perfdata.InstancesAll, []string{
|
||||
virtualSwitchBroadcastPacketsReceived,
|
||||
virtualSwitchBroadcastPacketsSent,
|
||||
virtualSwitchBytes,
|
||||
|
||||
@@ -57,7 +57,7 @@ var applicationStates = map[uint32]string{
|
||||
func (c *Collector) buildAppPoolWAS() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorAppPoolWAS, err = perfdata.NewCollector("APP_POOL_WAS", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorAppPoolWAS, err = perfdata.NewCollector("APP_POOL_WAS", perfdata.InstancesAll, []string{
|
||||
CurrentApplicationPoolState,
|
||||
CurrentApplicationPoolUptime,
|
||||
CurrentWorkerProcesses,
|
||||
|
||||
@@ -182,7 +182,7 @@ func (c *Collector) buildW3SVCW3WP() error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorW3SVCW3WP, err = perfdata.NewCollector("W3SVC_W3WP", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorW3SVCW3WP, err = perfdata.NewCollector("W3SVC_W3WP", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
|
||||
}
|
||||
@@ -432,8 +432,7 @@ func (c *Collector) collectW3SVCW3WP(ch chan<- prometheus.Metric) error {
|
||||
pid := workerProcessNameExtractor.ReplaceAllString(name, "$1")
|
||||
|
||||
name := workerProcessNameExtractor.ReplaceAllString(name, "$2")
|
||||
if name == "" || name == "_Total" ||
|
||||
c.config.AppExclude.MatchString(name) ||
|
||||
if name == "" || c.config.AppExclude.MatchString(name) ||
|
||||
!c.config.AppInclude.MatchString(name) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ const (
|
||||
func (c *Collector) buildWebService() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service", perfdata.InstancesAll, []string{
|
||||
CurrentAnonymousUsers,
|
||||
CurrentBlockedAsyncIORequests,
|
||||
CurrentCGIRequests,
|
||||
|
||||
@@ -87,7 +87,7 @@ const (
|
||||
func (c *Collector) buildWebServiceCache() error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service Cache", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service Cache", perfdata.InstancesAll, []string{
|
||||
ServiceCacheActiveFlushedEntries,
|
||||
ServiceCacheCurrentFileCacheMemoryUsage,
|
||||
ServiceCacheMaximumFileCacheMemoryUsage,
|
||||
|
||||
@@ -136,7 +136,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("LogicalDisk", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("LogicalDisk", perfdata.InstancesAll, []string{
|
||||
currentDiskQueueLength,
|
||||
avgDiskReadQueueLength,
|
||||
avgDiskWriteQueueLength,
|
||||
|
||||
@@ -134,7 +134,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Memory", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Memory", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Memory collector: %w", err)
|
||||
}
|
||||
@@ -414,7 +414,7 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
|
||||
if !ok {
|
||||
return errors.New("perflib query for Memory returned empty result set")
|
||||
|
||||
@@ -443,7 +443,7 @@ func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sq
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"))
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func (c *Collector) buildAvailabilityReplica() error {
|
||||
}
|
||||
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), perfdata.InstanceAll, counters)
|
||||
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance, err)
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ func (c *Collector) buildDatabases() error {
|
||||
}
|
||||
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.databasesPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Databases"), perfdata.InstanceAll, counters)
|
||||
c.databasesPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Databases"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance, err)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (c *Collector) buildDatabaseReplica() error {
|
||||
}
|
||||
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.dbReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), perfdata.InstanceAll, counters)
|
||||
c.dbReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance, err)
|
||||
}
|
||||
|
||||
@@ -263,7 +263,7 @@ func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"))
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func (c *Collector) buildLocks() error {
|
||||
}
|
||||
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.locksPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Locks"), perfdata.InstanceAll, counters)
|
||||
c.locksPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Locks"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance, err)
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func (c *Collector) buildMemoryManager() error {
|
||||
}
|
||||
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.memMgrPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), perfdata.InstanceAll, counters)
|
||||
c.memMgrPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance, err)
|
||||
}
|
||||
@@ -226,7 +226,7 @@ func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sq
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"))
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (c *Collector) buildSQLErrors() error {
|
||||
}
|
||||
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), perfdata.InstanceAll, counters)
|
||||
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance, err)
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInst
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"))
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sql
|
||||
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"))
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func (c *Collector) buildWaitStats() error {
|
||||
}
|
||||
|
||||
for sqlInstance := range c.mssqlInstances {
|
||||
c.waitStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), perfdata.InstanceAll, counters)
|
||||
c.waitStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance, err)
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Network Interface", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Network Interface", perfdata.InstancesAll, []string{
|
||||
bytesReceivedPerSec,
|
||||
bytesSentPerSec,
|
||||
bytesTotalPerSec,
|
||||
|
||||
@@ -298,7 +298,7 @@ func (c *Collector) collectAccept(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect NPS Authentication Server metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for NPS Authentication Server returned empty result set")
|
||||
}
|
||||
@@ -390,7 +390,7 @@ func (c *Collector) collectAccounting(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect NPS Accounting Server metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("perflib query for NPS Accounting Server returned empty result set")
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Paging File", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Paging File", perfdata.InstancesAll, []string{
|
||||
usage,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -118,7 +118,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
for instance, counters := range data {
|
||||
for counter, value := range counters {
|
||||
var labels prometheus.Labels
|
||||
if instance != perfdata.EmptyInstance {
|
||||
if instance != perfdata.InstanceEmpty {
|
||||
labels = prometheus.Labels{object.InstanceLabel: instance}
|
||||
}
|
||||
|
||||
|
||||
@@ -129,7 +129,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("PhysicalDisk", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector("PhysicalDisk", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
@@ -180,11 +180,11 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
workingSet,
|
||||
}
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Process V2", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Process V2", perfdata.InstancesAll, counters)
|
||||
if errors.Is(err, perfdata.NewPdhError(perfdata.PdhCstatusNoObject)) {
|
||||
counters[0] = idProcess
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Process", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector("Process", perfdata.InstancesAll, counters)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -324,9 +324,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
// Duplicate processes are suffixed #, and an index number. Remove those.
|
||||
name, _, _ = strings.Cut(name, "#")
|
||||
|
||||
if name == "_Total" ||
|
||||
c.config.ProcessExclude.MatchString(name) ||
|
||||
!c.config.ProcessInclude.MatchString(name) {
|
||||
if c.config.ProcessExclude.MatchString(name) || !c.config.ProcessInclude.MatchString(name) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorNetwork, err = perfdata.NewCollector("RemoteFX Network", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorNetwork, err = perfdata.NewCollector("RemoteFX Network", perfdata.InstancesAll, []string{
|
||||
BaseTCPRTT,
|
||||
BaseUDPRTT,
|
||||
CurrentTCPBandwidth,
|
||||
@@ -107,7 +107,7 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = perfdata.NewCollector("RemoteFX Graphics", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollectorGraphics, err = perfdata.NewCollector("RemoteFX Graphics", perfdata.InstancesAll, []string{
|
||||
AverageEncodingTime,
|
||||
FrameQuality,
|
||||
FramesSkippedPerSecondInsufficientClientResources,
|
||||
|
||||
@@ -5,4 +5,10 @@ package smb
|
||||
const (
|
||||
currentOpenFileCount = "Current Open File Count"
|
||||
treeConnectCount = "Tree Connect Count"
|
||||
receivedBytes = "Received Bytes/sec"
|
||||
writeRequests = "Write Requests/sec"
|
||||
readRequests = "Read Requests/sec"
|
||||
metadataRequests = "Metadata Requests/sec"
|
||||
sentBytes = "Sent Bytes/sec"
|
||||
filesOpened = "Files Opened/sec"
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
@@ -27,6 +26,12 @@ type Collector struct {
|
||||
|
||||
treeConnectCount *prometheus.Desc
|
||||
currentOpenFileCount *prometheus.Desc
|
||||
receivedBytes *prometheus.Desc
|
||||
writeRequests *prometheus.Desc
|
||||
readRequests *prometheus.Desc
|
||||
metadataRequests *prometheus.Desc
|
||||
sentBytes *prometheus.Desc
|
||||
filesOpened *prometheus.Desc
|
||||
}
|
||||
|
||||
func New(config *Config) *Collector {
|
||||
@@ -58,9 +63,15 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("SMB Server Shares", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("SMB Server Shares", perfdata.InstancesAll, []string{
|
||||
currentOpenFileCount,
|
||||
treeConnectCount,
|
||||
receivedBytes,
|
||||
writeRequests,
|
||||
readRequests,
|
||||
metadataRequests,
|
||||
sentBytes,
|
||||
filesOpened,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
|
||||
@@ -68,14 +79,50 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
c.currentOpenFileCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_current_open_file_count"),
|
||||
"Current total count open files on the SMB Server",
|
||||
nil,
|
||||
"Current total count open files on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
c.treeConnectCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_tree_connect_count"),
|
||||
"Count of user connections to the SMB Server",
|
||||
"Count of user connections to the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
c.receivedBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_received_bytes_total"),
|
||||
"Received bytes on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
c.writeRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_write_requests_count"),
|
||||
"Writes requests on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
c.readRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_read_requests_count"),
|
||||
"Read requests on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
c.metadataRequests = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_metadata_requests_count"),
|
||||
"Metadata requests on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
c.sentBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_sent_bytes_total"),
|
||||
"Sent bytes on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
c.filesOpened = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_filed_opened_count"),
|
||||
"Files opened on the SMB Server Share",
|
||||
[]string{"share"},
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -89,22 +136,63 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect SMB Server Shares metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
if !ok {
|
||||
return errors.New("query for SMB Server Shares returned empty result set")
|
||||
for share, data := range perfData {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentOpenFileCount,
|
||||
prometheus.CounterValue,
|
||||
data[currentOpenFileCount].FirstValue,
|
||||
share,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.treeConnectCount,
|
||||
prometheus.CounterValue,
|
||||
data[treeConnectCount].FirstValue,
|
||||
share,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.receivedBytes,
|
||||
prometheus.CounterValue,
|
||||
data[receivedBytes].FirstValue,
|
||||
share,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.writeRequests,
|
||||
prometheus.GaugeValue,
|
||||
data[writeRequests].FirstValue,
|
||||
share,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.readRequests,
|
||||
prometheus.GaugeValue,
|
||||
data[readRequests].FirstValue,
|
||||
share,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.metadataRequests,
|
||||
prometheus.GaugeValue,
|
||||
data[metadataRequests].FirstValue,
|
||||
share,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sentBytes,
|
||||
prometheus.CounterValue,
|
||||
data[sentBytes].FirstValue,
|
||||
share,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.filesOpened,
|
||||
prometheus.GaugeValue,
|
||||
data[filesOpened].FirstValue,
|
||||
share,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentOpenFileCount,
|
||||
prometheus.CounterValue,
|
||||
data[currentOpenFileCount].FirstValue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.treeConnectCount,
|
||||
prometheus.CounterValue,
|
||||
data[treeConnectCount].FirstValue,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("SMB Client Shares", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("SMB Client Shares", perfdata.InstancesAll, []string{
|
||||
AvgDataQueueLength,
|
||||
AvgReadQueueLength,
|
||||
AvgSecPerRead,
|
||||
|
||||
@@ -144,7 +144,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector("SMTP Server", perfdata.InstanceAll, []string{
|
||||
c.perfDataCollector, err = perfdata.NewCollector("SMTP Server", perfdata.InstancesAll, []string{
|
||||
badmailedMessagesBadPickupFileTotal,
|
||||
badmailedMessagesGeneralFailureTotal,
|
||||
badmailedMessagesHopCountExceededTotal,
|
||||
|
||||
@@ -139,7 +139,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect System metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("query for System returned empty result set")
|
||||
}
|
||||
|
||||
@@ -215,22 +215,22 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect TCPv4 metrics: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := data[perfdata.EmptyInstance]; !ok {
|
||||
if _, ok := data[perfdata.InstanceEmpty]; !ok {
|
||||
return errors.New("no data for TCPv4")
|
||||
}
|
||||
|
||||
c.writeTCPCounters(ch, data[perfdata.EmptyInstance], []string{"ipv4"})
|
||||
c.writeTCPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv4"})
|
||||
|
||||
data, err = c.perfDataCollector6.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect TCPv6 metrics: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := data[perfdata.EmptyInstance]; !ok {
|
||||
if _, ok := data[perfdata.InstanceEmpty]; !ok {
|
||||
return errors.New("no data for TCPv6")
|
||||
}
|
||||
|
||||
c.writeTCPCounters(ch, data[perfdata.EmptyInstance], []string{"ipv6"})
|
||||
c.writeTCPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv6"})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -142,7 +142,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorTerminalServicesSession, err = perfdata.NewCollector("Terminal Services Session", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorTerminalServicesSession, err = perfdata.NewCollector("Terminal Services Session", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Terminal Services Session collector: %w", err)
|
||||
}
|
||||
@@ -158,7 +158,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorBroker, err = perfdata.NewCollector("Remote Desktop Connection Broker Counterset", perfdata.InstanceAll, counters)
|
||||
c.perfDataCollectorBroker, err = perfdata.NewCollector("Remote Desktop Connection Broker Counterset", perfdata.InstancesAll, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
|
||||
}
|
||||
@@ -413,7 +413,7 @@ func (c *Collector) collectCollectionBrokerPerformanceCounter(ch chan<- promethe
|
||||
return fmt.Errorf("failed to collect Remote Desktop Connection Broker Counterset metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("query for Remote Desktop Connection Broker Counterset returned empty result set")
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ func (c *Collector) collectNTP(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("query for Windows Time Service returned empty result set")
|
||||
}
|
||||
|
||||
@@ -124,22 +124,22 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect UDPv4 metrics: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := data[perfdata.EmptyInstance]; !ok {
|
||||
if _, ok := data[perfdata.InstanceEmpty]; !ok {
|
||||
return errors.New("no data for UDPv4")
|
||||
}
|
||||
|
||||
c.writeUDPCounters(ch, data[perfdata.EmptyInstance], []string{"ipv4"})
|
||||
c.writeUDPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv4"})
|
||||
|
||||
data, err = c.perfDataCollector6.Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect UDPv6 metrics: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := data[perfdata.EmptyInstance]; !ok {
|
||||
if _, ok := data[perfdata.InstanceEmpty]; !ok {
|
||||
return errors.New("no data for UDPv6")
|
||||
}
|
||||
|
||||
c.writeUDPCounters(ch, data[perfdata.EmptyInstance], []string{"ipv6"})
|
||||
c.writeUDPCounters(ch, data[perfdata.InstanceEmpty], []string{"ipv6"})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorCPU, err = perfdata.NewCollector("VM Processor", perfdata.InstanceTotal, []string{
|
||||
c.perfDataCollectorCPU, err = perfdata.NewCollector("VM Processor", perfdata.InstancesTotal, []string{
|
||||
cpuLimitMHz,
|
||||
cpuReservationMHz,
|
||||
cpuShares,
|
||||
@@ -251,7 +251,7 @@ func (c *Collector) collectMem(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData[perfdata.EmptyInstance]
|
||||
data, ok := perfData[perfdata.InstanceEmpty]
|
||||
if !ok {
|
||||
return errors.New("query for VM Memory returned empty result set")
|
||||
}
|
||||
@@ -337,7 +337,7 @@ func (c *Collector) collectCpu(ch chan<- prometheus.Metric) error {
|
||||
return fmt.Errorf("failed to collect VM Memory metrics: %w", err)
|
||||
}
|
||||
|
||||
data, ok := perfData["_Total"]
|
||||
data, ok := perfData[perfdata.InstanceTotal]
|
||||
if !ok {
|
||||
return errors.New("query for VM CPU returned empty result set")
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
InstanceAll = []string{"*"}
|
||||
InstanceTotal = []string{"_Total"}
|
||||
InstancesAll = []string{"*"}
|
||||
InstancesTotal = []string{InstanceTotal}
|
||||
)
|
||||
|
||||
type Collector struct {
|
||||
@@ -43,14 +43,14 @@ func NewCollector(object string, instances []string, counters []string) (*Collec
|
||||
}
|
||||
|
||||
if len(instances) == 0 {
|
||||
instances = []string{EmptyInstance}
|
||||
instances = []string{InstanceEmpty}
|
||||
}
|
||||
|
||||
collector := &Collector{
|
||||
object: object,
|
||||
counters: make(map[string]Counter, len(counters)),
|
||||
handle: handle,
|
||||
totalCounterRequested: slices.Contains(instances, "_Total"),
|
||||
totalCounterRequested: slices.Contains(instances, InstanceTotal),
|
||||
mu: sync.RWMutex{},
|
||||
}
|
||||
|
||||
@@ -186,12 +186,12 @@ func (c *Collector) Collect() (map[string]map[string]CounterValues, error) {
|
||||
for _, item := range items {
|
||||
if item.RawValue.CStatus == PdhCstatusValidData || item.RawValue.CStatus == PdhCstatusNewData {
|
||||
instanceName := windows.UTF16PtrToString(item.SzName)
|
||||
if strings.HasSuffix(instanceName, "_Total") && !c.totalCounterRequested {
|
||||
if strings.HasSuffix(instanceName, InstanceTotal) && !c.totalCounterRequested {
|
||||
continue
|
||||
}
|
||||
|
||||
if instanceName == "" || instanceName == "*" {
|
||||
instanceName = EmptyInstance
|
||||
instanceName = InstanceEmpty
|
||||
}
|
||||
|
||||
if _, ok := data[instanceName]; !ok {
|
||||
@@ -239,7 +239,7 @@ func (c *Collector) Close() {
|
||||
func formatCounterPath(object, instance, counterName string) string {
|
||||
var counterPath string
|
||||
|
||||
if instance == EmptyInstance {
|
||||
if instance == InstanceEmpty {
|
||||
counterPath = fmt.Sprintf(`\%s\%s`, object, counterName)
|
||||
} else {
|
||||
counterPath = fmt.Sprintf(`\%s(%s)\%s`, object, instance, counterName)
|
||||
|
||||
@@ -4,7 +4,10 @@ package perfdata
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
const EmptyInstance = "------"
|
||||
const (
|
||||
InstanceEmpty = "------"
|
||||
InstanceTotal = "_Total"
|
||||
)
|
||||
|
||||
type CounterValues struct {
|
||||
Type prometheus.ValueType
|
||||
|
||||
Reference in New Issue
Block a user