mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 22:16:38 +00:00
Compare commits
71 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6f3b33928 | ||
|
|
8ef215cc7e | ||
|
|
a7b5cf7aa6 | ||
|
|
719ccd4f7f | ||
|
|
7ab8c7dde4 | ||
|
|
eb002eb667 | ||
|
|
a1638cdf4c | ||
|
|
091406877a | ||
|
|
84970ac086 | ||
|
|
d86f318010 | ||
|
|
853d615673 | ||
|
|
cd9a740e2b | ||
|
|
c70e7674a5 | ||
|
|
d3e3835c29 | ||
|
|
592c8a8d69 | ||
|
|
6f6a479535 | ||
|
|
d01c66986c | ||
|
|
823ffb7597 | ||
|
|
a90f9cda0f | ||
|
|
31d4c28124 | ||
|
|
e880889f07 | ||
|
|
a283608812 | ||
|
|
8251ddd176 | ||
|
|
4f0a3a89ab | ||
|
|
27cc1072fe | ||
|
|
eb9cf56dee | ||
|
|
3c20887433 | ||
|
|
37d1c4e958 | ||
|
|
33b6e17b2d | ||
|
|
1a9d4afdd6 | ||
|
|
9e198c55a4 | ||
|
|
b309a05bde | ||
|
|
123a055242 | ||
|
|
9308108284 | ||
|
|
0ecf3cd792 | ||
|
|
801444b35b | ||
|
|
f4ab322e5b | ||
|
|
72de199528 | ||
|
|
304972580d | ||
|
|
6322bb124f | ||
|
|
cb6a91b705 | ||
|
|
4d9fb1be72 | ||
|
|
27e26037e3 | ||
|
|
e09497116f | ||
|
|
3099e10555 | ||
|
|
3900504504 | ||
|
|
2c5e30d920 | ||
|
|
b348c245e8 | ||
|
|
578bcc4959 | ||
|
|
31a30474f1 | ||
|
|
ce1005add8 | ||
|
|
6107a59306 | ||
|
|
47656b16bd | ||
|
|
8fc47669be | ||
|
|
1a67ca54b6 | ||
|
|
c73f52338d | ||
|
|
c5f23b4e64 | ||
|
|
411954cf9d | ||
|
|
56be7c63d5 | ||
|
|
6ffe504f7e | ||
|
|
daa6f3d111 | ||
|
|
85fdfb44b8 | ||
|
|
33879449a2 | ||
|
|
462a136673 | ||
|
|
d5e39892cf | ||
|
|
ec0d863c29 | ||
|
|
afc3655a41 | ||
|
|
e25e96a62e | ||
|
|
23d92cfcae | ||
|
|
1258703f23 | ||
|
|
8841091f9c |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,3 +3,5 @@ VERSION
|
||||
*.swp
|
||||
*.un~
|
||||
output/
|
||||
.vscode
|
||||
.idea
|
||||
@@ -12,10 +12,12 @@ Name | Description | Enabled by default
|
||||
[ad](docs/collector.ad.md) | Active Directory Domain Services |
|
||||
[cpu](docs/collector.cpu.md) | CPU usage | ✓
|
||||
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | ✓
|
||||
[container](docs/collector.container.md) | Container metrics |
|
||||
[dns](docs/collector.dns.md) | DNS Server |
|
||||
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[logon](docs/collector.logon.md) | User logon sessions |
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
|
||||
@@ -33,6 +35,7 @@ Name | Description | Enabled by default
|
||||
[service](docs/collector.service.md) | Service state metrics | ✓
|
||||
[system](docs/collector.system.md) | System calls | ✓
|
||||
[tcp](docs/collector.tcp.md) | TCP connections |
|
||||
[thermalzone](docs/collector.thermalzone.md) | Thermal information
|
||||
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | ✓
|
||||
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
|
||||
|
||||
|
||||
10
appveyor.yml
10
appveyor.yml
@@ -11,6 +11,7 @@ clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
- go get -u github.com/prometheus/promu
|
||||
- go get -u github.com/alecthomas/gometalinter && gometalinter --install
|
||||
- choco install gitversion.portable make -y
|
||||
@@ -39,12 +40,13 @@ after_build:
|
||||
return
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
$BuildVersion = Get-Content VERSION
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$Version = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
$MSIVersion = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
foreach($Arch in "amd64","386") {
|
||||
Write-Verbose "Building wmi_exporter $Version msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$Version-$Arch.exe -Version $Version -Arch "$Arch"
|
||||
Move-Item installer\Output\wmi_exporter-$Version-$Arch.msi output\$Arch\
|
||||
Write-Verbose "Building wmi_exporter $MSIVersion msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
|
||||
Move-Item installer\Output\wmi_exporter-$MSIVersion-$Arch.msi output\$Arch\
|
||||
}
|
||||
- promu checksum output\
|
||||
|
||||
|
||||
@@ -454,7 +454,7 @@ func NewADCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ADCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ad metrics:", desc, err)
|
||||
return err
|
||||
|
||||
71
collector/collector.go
Normal file
71
collector/collector.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
// TODO: Make package-local
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
windowsEpoch = 116444736000000000
|
||||
)
|
||||
|
||||
// getWindowsVersion reads the version number of the OS from the Registry
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
type ScrapeContext struct {
|
||||
perfObjects map[string]*perflib.PerfObject
|
||||
}
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
|
||||
func PrepareScrapeContext() (*ScrapeContext, error) {
|
||||
objs, err := getPerflibSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ScrapeContext{objs}, nil
|
||||
}
|
||||
282
collector/container.go
Normal file
282
collector/container.go
Normal file
@@ -0,0 +1,282 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["container"] = NewContainerMetricsCollector
|
||||
}
|
||||
|
||||
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
|
||||
type ContainerMetricsCollector struct {
|
||||
// Presence
|
||||
ContainerAvailable *prometheus.Desc
|
||||
|
||||
// Number of containers
|
||||
ContainersCount *prometheus.Desc
|
||||
// memory
|
||||
UsageCommitBytes *prometheus.Desc
|
||||
UsageCommitPeakBytes *prometheus.Desc
|
||||
UsagePrivateWorkingSetBytes *prometheus.Desc
|
||||
|
||||
// CPU
|
||||
RuntimeTotal *prometheus.Desc
|
||||
RuntimeUser *prometheus.Desc
|
||||
RuntimeKernel *prometheus.Desc
|
||||
|
||||
// Network
|
||||
BytesReceived *prometheus.Desc
|
||||
BytesSent *prometheus.Desc
|
||||
PacketsReceived *prometheus.Desc
|
||||
PacketsSent *prometheus.Desc
|
||||
DroppedPacketsIncoming *prometheus.Desc
|
||||
DroppedPacketsOutgoing *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
|
||||
func NewContainerMetricsCollector() (Collector, error) {
|
||||
const subsystem = "container"
|
||||
return &ContainerMetricsCollector{
|
||||
ContainerAvailable: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available"),
|
||||
"Available",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
ContainersCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "count"),
|
||||
"Number of containers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
UsageCommitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"),
|
||||
"Memory Usage Commit Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsageCommitPeakBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"),
|
||||
"Memory Usage Commit Peak Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsagePrivateWorkingSetBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"),
|
||||
"Memory Usage Private Working Set Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"),
|
||||
"Total Run time in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeUser: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"),
|
||||
"Run Time in User mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeKernel: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"),
|
||||
"Run time in Kernel mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
BytesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"),
|
||||
"Bytes Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
BytesSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"),
|
||||
"Bytes Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"),
|
||||
"Packets Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"),
|
||||
"Packets Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsIncoming: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"),
|
||||
"Dropped Incoming Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsOutgoing: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"),
|
||||
"Dropped Outgoing Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// containerClose closes the container resource
|
||||
func containerClose(c hcsshim.Container) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
|
||||
// Types Container is passed to get the containers compute systems only
|
||||
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
|
||||
if err != nil {
|
||||
log.Error("Err in Getting containers:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(containers)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainersCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, containerDetails := range containers {
|
||||
containerId := containerDetails.ID
|
||||
|
||||
container, err := hcsshim.OpenContainer(containerId)
|
||||
if container != nil {
|
||||
defer containerClose(container)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("err in opening container: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cstats, err := container.Statistics()
|
||||
if err != nil {
|
||||
log.Error("err in fetching container Statistics: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
// HCS V1 is for docker runtime. Add the docker:// prefix on container_id
|
||||
containerId = "docker://" + containerId
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainerAvailable,
|
||||
prometheus.CounterValue,
|
||||
1,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitPeakBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitPeakBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsagePrivateWorkingSetBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeUser,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeKernel,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
|
||||
if len(cstats.Network) == 0 {
|
||||
log.Info("No Network Stats for container: ", containerId)
|
||||
continue
|
||||
}
|
||||
|
||||
networkStats := cstats.Network
|
||||
|
||||
for _, networkInterface := range networkStats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsIncoming,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsIncoming),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsOutgoing,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsOutgoing),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
382
collector/cpu.go
382
collector/cpu.go
@@ -5,27 +5,73 @@ package collector
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cpu"] = NewCPUCollector
|
||||
Factories["cpu"] = newCPUCollector
|
||||
}
|
||||
|
||||
// A CPUCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Processor metrics
|
||||
type CPUCollector struct {
|
||||
type cpuCollectorBasic struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
}
|
||||
type cpuCollectorFull struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ClockInterruptsTotal *prometheus.Desc
|
||||
IdleBreakEventsTotal *prometheus.Desc
|
||||
ParkingStatus *prometheus.Desc
|
||||
ProcessorFrequencyMHz *prometheus.Desc
|
||||
ProcessorMaxFrequencyMHz *prometheus.Desc
|
||||
ProcessorPerformance *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCPUCollector constructs a new CPUCollector
|
||||
func NewCPUCollector() (Collector, error) {
|
||||
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
|
||||
func newCPUCollector() (Collector, error) {
|
||||
const subsystem = "cpu"
|
||||
return &CPUCollector{
|
||||
|
||||
version := getWindowsVersion()
|
||||
// For Windows 2008 (version 6.0) or earlier we only have the "Processor"
|
||||
// class. As of Windows 2008 R2 (version 6.1) the more detailed
|
||||
// "ProcessorInformation" set is available (although some of the counters
|
||||
// are added in later versions, so we aren't guaranteed to get all of
|
||||
// them).
|
||||
// Value 6.05 was selected to split between Windows versions.
|
||||
if version < 6.05 {
|
||||
return &cpuCollectorBasic{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &cpuCollectorFull{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
@@ -38,7 +84,6 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
@@ -51,164 +96,273 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ClockInterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
IdleBreakEventsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ParkingStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequencyMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_performance"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CPUCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cpu metrics:", desc, err)
|
||||
type perflibProcessor struct {
|
||||
Name string
|
||||
C1Transitions float64 `perflib:"C1 Transitions/sec"`
|
||||
C2Transitions float64 `perflib:"C2 Transitions/sec"`
|
||||
C3Transitions float64 `perflib:"C3 Transitions/sec"`
|
||||
DPCRate float64 `perflib:"DPC Rate"`
|
||||
DPCsQueued float64 `perflib:"DPCs Queued/sec"`
|
||||
Interrupts float64 `perflib:"Interrupts/sec"`
|
||||
PercentC2Time float64 `perflib:"% C1 Time"`
|
||||
PercentC3Time float64 `perflib:"% C2 Time"`
|
||||
PercentC1Time float64 `perflib:"% C3 Time"`
|
||||
PercentDPCTime float64 `perflib:"% DPC Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
PercentInterruptTime float64 `perflib:"% Interrupt Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessor, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfOS_Processor docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx
|
||||
type Win32_PerfRawData_PerfOS_Processor struct {
|
||||
Name string
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
DPCRate uint32
|
||||
DPCsQueuedPersec uint32
|
||||
InterruptsPersec uint32
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentUserTime uint64
|
||||
}
|
||||
|
||||
/* NOTE: This is an alternative class, but it is not as widely available. Decide which to use
|
||||
type Win32_PerfRawData_Counters_ProcessorInformation struct {
|
||||
Name string
|
||||
AverageIdleTime uint64
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
ClockInterruptsPersec uint64
|
||||
DPCRate uint64
|
||||
DPCsQueuedPersec uint64
|
||||
IdleBreakEventsPersec uint64
|
||||
InterruptsPersec uint64
|
||||
ParkingStatus uint64
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentofMaximumFrequency uint64
|
||||
PercentPerformanceLimit uint64
|
||||
PercentPriorityTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentPrivilegedUtility uint64
|
||||
PercentProcessorPerformance uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentProcessorUtility uint64
|
||||
PercentUserTime uint64
|
||||
PerformanceLimitFlags uint64
|
||||
ProcessorFrequency uint64
|
||||
ProcessorStateFlags uint64
|
||||
}*/
|
||||
|
||||
func (c *CPUCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_Processor
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, data := range dst {
|
||||
if strings.Contains(data.Name, "_Total") {
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
core := data.Name
|
||||
|
||||
// These are only available from Win32_PerfRawData_Counters_ProcessorInformation, which is only available from Win2008R2+
|
||||
/*ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MaximumFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentofMaximumFrequency)/100*float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)*/
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC1Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC1Time,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC2Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC2Time,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC3Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC3Time,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentIdleTime,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentInterruptTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentInterruptTime,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentDPCTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentDPCTime,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentPrivilegedTime,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentUserTime,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.InterruptsPersec),
|
||||
cpu.Interrupts,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.DPCsQueuedPersec),
|
||||
cpu.DPCsQueued,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ClockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ParkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPerformance,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func NewCSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cs metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -181,7 +181,7 @@ func NewDNSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting dns metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -597,7 +597,7 @@ func NewHyperVCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *HyperVCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectVmHealth(ch); err != nil {
|
||||
log.Error("failed collecting hyperV health status metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -818,7 +818,7 @@ func NewIISCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *IISCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *IISCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting iis metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -29,17 +29,20 @@ var (
|
||||
|
||||
// A LogicalDiskCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfDisk_LogicalDisk metrics
|
||||
type LogicalDiskCollector struct {
|
||||
RequestsQueued *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
RequestsQueued *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
ReadLatency *prometheus.Desc
|
||||
WriteLatency *prometheus.Desc
|
||||
ReadWriteLatency *prometheus.Desc
|
||||
|
||||
volumeWhitelistPattern *regexp.Regexp
|
||||
volumeBlacklistPattern *regexp.Regexp
|
||||
@@ -127,6 +130,27 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
WriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "write_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadWriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_write_latency_seconds_total"),
|
||||
"Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
volumeWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeWhitelist)),
|
||||
volumeBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeBlacklist)),
|
||||
}, nil
|
||||
@@ -134,7 +158,7 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting logical_disk metrics:", desc, err)
|
||||
return err
|
||||
@@ -158,6 +182,9 @@ type Win32_PerfRawData_PerfDisk_LogicalDisk struct {
|
||||
PercentFreeSpace_Base uint32
|
||||
PercentIdleTime uint64
|
||||
SplitIOPerSec uint32
|
||||
AvgDiskSecPerRead uint64
|
||||
AvgDiskSecPerWrite uint64
|
||||
AvgDiskSecPerTransfer uint64
|
||||
}
|
||||
|
||||
func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
@@ -250,6 +277,27 @@ func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus
|
||||
float64(volume.SplitIOPerSec),
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadLatency,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.AvgDiskSecPerRead),
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteLatency,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.AvgDiskSecPerWrite),
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadWriteLatency,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.AvgDiskSecPerTransfer),
|
||||
volume.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
||||
199
collector/logon.go
Normal file
199
collector/logon.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["logon"] = NewLogonCollector
|
||||
}
|
||||
|
||||
// A LogonCollector is a Prometheus collector for WMI metrics
|
||||
type LogonCollector struct {
|
||||
LogonType *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewLogonCollector ...
|
||||
func NewLogonCollector() (Collector, error) {
|
||||
const subsystem = "logon"
|
||||
|
||||
return &LogonCollector{
|
||||
LogonType: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "logon_type"),
|
||||
"Number of active logon sessions (LogonSession.LogonType)",
|
||||
[]string{"status"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogonCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting user metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_LogonSession docs:
|
||||
// - https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession
|
||||
type Win32_LogonSession struct {
|
||||
LogonType uint32
|
||||
}
|
||||
|
||||
func (c *LogonCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_LogonSession
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Init counters
|
||||
system := 0
|
||||
interactive := 0
|
||||
network := 0
|
||||
batch := 0
|
||||
service := 0
|
||||
proxy := 0
|
||||
unlock := 0
|
||||
networkcleartext := 0
|
||||
newcredentials := 0
|
||||
remoteinteractive := 0
|
||||
cachedinteractive := 0
|
||||
cachedremoteinteractive := 0
|
||||
cachedunlock := 0
|
||||
|
||||
for _, entry := range dst {
|
||||
switch entry.LogonType {
|
||||
case 0:
|
||||
system++
|
||||
case 2:
|
||||
interactive++
|
||||
case 3:
|
||||
network++
|
||||
case 4:
|
||||
batch++
|
||||
case 5:
|
||||
service++
|
||||
case 6:
|
||||
proxy++
|
||||
case 7:
|
||||
unlock++
|
||||
case 8:
|
||||
networkcleartext++
|
||||
case 9:
|
||||
newcredentials++
|
||||
case 10:
|
||||
remoteinteractive++
|
||||
case 11:
|
||||
cachedinteractive++
|
||||
case 12:
|
||||
cachedremoteinteractive++
|
||||
case 13:
|
||||
cachedunlock++
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(system),
|
||||
"system",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(interactive),
|
||||
"interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(network),
|
||||
"network",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(batch),
|
||||
"batch",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(service),
|
||||
"service",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(proxy),
|
||||
"proxy",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(unlock),
|
||||
"unlock",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(networkcleartext),
|
||||
"network_clear_text",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(newcredentials),
|
||||
"new_credentials",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedinteractive),
|
||||
"cached_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"cached_remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedunlock),
|
||||
"cached_unlock",
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
@@ -256,7 +256,7 @@ func NewMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting memory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -33,7 +33,7 @@ type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
|
||||
func NewMSMQCollector() (Collector, error) {
|
||||
const subsystem = "msmq"
|
||||
|
||||
if *msmqWhereClause != "" {
|
||||
if *msmqWhereClause == "" {
|
||||
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func NewMSMQCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -91,7 +91,7 @@ func mssqlBuildWMIInstanceClass(suffix string, instance string) string {
|
||||
type mssqlCollectorsMap map[string]mssqlCollectorFunc
|
||||
|
||||
func mssqlAvailableClassCollectors() string {
|
||||
return "accessmethods,availreplica,bufman,databases,dbreplica,genstats,locks,memmgr,sqlstats"
|
||||
return "accessmethods,availreplica,bufman,databases,dbreplica,genstats,locks,memmgr,sqlstats,sqlerrors,transactions"
|
||||
}
|
||||
|
||||
func (c *MSSQLCollector) getMSSQLCollectors() mssqlCollectorsMap {
|
||||
@@ -105,6 +105,8 @@ func (c *MSSQLCollector) getMSSQLCollectors() mssqlCollectorsMap {
|
||||
mssqlCollectors["locks"] = c.collectLocks
|
||||
mssqlCollectors["memmgr"] = c.collectMemoryManager
|
||||
mssqlCollectors["sqlstats"] = c.collectSQLStats
|
||||
mssqlCollectors["sqlerrors"] = c.collectSQLErrors
|
||||
mssqlCollectors["transactions"] = c.collectTransactions
|
||||
|
||||
return mssqlCollectors
|
||||
}
|
||||
@@ -358,6 +360,24 @@ type MSSQLCollector struct {
|
||||
SQLStatsSQLReCompilations *prometheus.Desc
|
||||
SQLStatsUnsafeAutoParams *prometheus.Desc
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
|
||||
SQLErrorsTotal *prometheus.Desc
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerTransactions
|
||||
TransactionsTempDbFreeSpaceBytes *prometheus.Desc
|
||||
TransactionsLongestTransactionRunningSeconds *prometheus.Desc
|
||||
TransactionsNonSnapshotVersionActiveTotal *prometheus.Desc
|
||||
TransactionsSnapshotActiveTotal *prometheus.Desc
|
||||
TransactionsActiveTotal *prometheus.Desc
|
||||
TransactionsUpdateConflictsTotal *prometheus.Desc
|
||||
TransactionsUpdateSnapshotActiveTotal *prometheus.Desc
|
||||
TransactionsVersionCleanupRateBytes *prometheus.Desc
|
||||
TransactionsVersionGenerationRateBytes *prometheus.Desc
|
||||
TransactionsVersionStoreSizeBytes *prometheus.Desc
|
||||
TransactionsVersionStoreUnits *prometheus.Desc
|
||||
TransactionsVersionStoreCreationUnits *prometheus.Desc
|
||||
TransactionsVersionStoreTruncationUnits *prometheus.Desc
|
||||
|
||||
mssqlInstances mssqlInstancesType
|
||||
mssqlCollectors mssqlCollectorsMap
|
||||
mssqlChildCollectorFailure int
|
||||
@@ -1637,6 +1657,94 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
|
||||
SQLErrorsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sql_errors_total"),
|
||||
"(SQLErrors.Total)",
|
||||
[]string{"instance", "resource"},
|
||||
nil,
|
||||
),
|
||||
|
||||
// Win32_PerfRawData_{instance}_SQLServerTransactions
|
||||
TransactionsTempDbFreeSpaceBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_tempdb_free_space_bytes"),
|
||||
"(Transactions.FreeSpaceInTempDbKB)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsLongestTransactionRunningSeconds: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_longest_transaction_running_seconds"),
|
||||
"(Transactions.LongestTransactionRunningTime)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsNonSnapshotVersionActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_nonsnapshot_version_active_total"),
|
||||
"(Transactions.NonSnapshotVersionTransactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsSnapshotActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_snapshot_active_total"),
|
||||
"(Transactions.SnapshotTransactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_active_total"),
|
||||
"(Transactions.Transactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsUpdateConflictsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_update_conflicts_total"),
|
||||
"(Transactions.UpdateConflictRatio)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsUpdateSnapshotActiveTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_update_snapshot_active_total"),
|
||||
"(Transactions.UpdateSnapshotTransactions)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionCleanupRateBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_cleanup_rate_bytes"),
|
||||
"(Transactions.VersionCleanupRateKBs)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionGenerationRateBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_generation_rate_bytes"),
|
||||
"(Transactions.VersionGenerationRateKBs)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreSizeBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_size_bytes"),
|
||||
"(Transactions.VersionStoreSizeKB)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreUnits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_units"),
|
||||
"(Transactions.VersionStoreUnitCount)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreCreationUnits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_creation_units"),
|
||||
"(Transactions.VersionStoreUnitCreation)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
TransactionsVersionStoreTruncationUnits: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transactions_version_store_truncation_units"),
|
||||
"(Transactions.VersionStoreUnitTruncation)",
|
||||
[]string{"instance"},
|
||||
nil,
|
||||
),
|
||||
|
||||
mssqlInstances: getMSSQLInstances(),
|
||||
}
|
||||
|
||||
@@ -1687,7 +1795,7 @@ func (c *MSSQLCollector) execute(name string, fn mssqlCollectorFunc, ch chan<- p
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSSQLCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *MSSQLCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
enabled := mssqlExpandEnabledCollectors(*mssqlEnabledCollectors)
|
||||
@@ -2575,7 +2683,7 @@ func (c *MSSQLCollector) collectDatabaseReplica(ch chan<- prometheus.Metric, sql
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DBReplicaTransactionDelay,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.TransactionDelay)*1000.0,
|
||||
float64(v.TransactionDelay)/1000.0,
|
||||
sqlInstance, replicaName,
|
||||
)
|
||||
}
|
||||
@@ -3558,3 +3666,162 @@ func (c *MSSQLCollector) collectSQLStats(ch chan<- prometheus.Metric, sqlInstanc
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type win32PerfRawDataSQLServerSQLErrors struct {
|
||||
Name string
|
||||
ErrorsPersec uint64
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerErrors docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object
|
||||
func (c *MSSQLCollector) collectSQLErrors(ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
|
||||
var dst []win32PerfRawDataSQLServerSQLErrors
|
||||
log.Debugf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance)
|
||||
|
||||
class := mssqlBuildWMIInstanceClass("SQLErrors", sqlInstance)
|
||||
q := queryAllForClassWhere(&dst, class, `Name <> '_Total'`)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
resource := v.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SQLErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.ErrorsPersec),
|
||||
sqlInstance, resource,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type win32PerfRawDataSqlServerTransactions struct {
|
||||
FreeSpaceintempdbKB uint64
|
||||
LongestTransactionRunningTime uint64
|
||||
NonSnapshotVersionTransactions uint64
|
||||
SnapshotTransactions uint64
|
||||
Transactions uint64
|
||||
Updateconflictratio uint64
|
||||
UpdateSnapshotTransactions uint64
|
||||
VersionCleanuprateKBPers uint64
|
||||
VersionGenerationrateKBPers uint64
|
||||
VersionStoreSizeKB uint64
|
||||
VersionStoreunitcount uint64
|
||||
VersionStoreunitcreation uint64
|
||||
VersionStoreunittruncation uint64
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
|
||||
func (c *MSSQLCollector) collectTransactions(ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
|
||||
var dst []win32PerfRawDataSqlServerTransactions
|
||||
log.Debugf("mssql_transactions collector iterating sql instance %s.", sqlInstance)
|
||||
|
||||
class := mssqlBuildWMIInstanceClass("Transactions", sqlInstance)
|
||||
q := queryAllForClass(&dst, class)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
v := dst[0]
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsTempDbFreeSpaceBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FreeSpaceintempdbKB*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsLongestTransactionRunningSeconds,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.LongestTransactionRunningTime),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsNonSnapshotVersionActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.NonSnapshotVersionTransactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.SnapshotTransactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.Transactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsUpdateConflictsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.Updateconflictratio),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsUpdateSnapshotActiveTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(v.UpdateSnapshotTransactions),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionCleanupRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.VersionCleanuprateKBPers*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionGenerationRateBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.VersionGenerationrateKBPers*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreSizeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.VersionStoreSizeKB*1024),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreUnits,
|
||||
prometheus.CounterValue,
|
||||
float64(v.VersionStoreunitcount),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreCreationUnits,
|
||||
prometheus.CounterValue,
|
||||
float64(v.VersionStoreunitcreation),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransactionsVersionStoreTruncationUnits,
|
||||
prometheus.CounterValue,
|
||||
float64(v.VersionStoreunittruncation),
|
||||
sqlInstance,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ func NewNetworkCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting net metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -46,7 +46,7 @@ func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRJitCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -88,7 +88,7 @@ func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -74,7 +74,7 @@ func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -112,7 +112,7 @@ func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -67,7 +67,7 @@ func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -113,7 +113,7 @@ func NewOSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting os metrics:", desc, err)
|
||||
return err
|
||||
|
||||
100
collector/perflib.go
Normal file
100
collector/perflib.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func getPerflibSnapshot() (map[string]*perflib.PerfObject, error) {
|
||||
objects, err := perflib.QueryPerformanceData("Global")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexed := make(map[string]*perflib.PerfObject)
|
||||
for _, obj := range objects {
|
||||
indexed[obj.Name] = obj
|
||||
}
|
||||
return indexed, nil
|
||||
}
|
||||
|
||||
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("counter not found")
|
||||
}
|
||||
rv := reflect.ValueOf(vs)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return fmt.Errorf("%v is nil or not a pointer to slice", reflect.TypeOf(vs))
|
||||
}
|
||||
ev := rv.Elem()
|
||||
if ev.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("%v is not slice", reflect.TypeOf(vs))
|
||||
}
|
||||
|
||||
// Ensure sufficient length
|
||||
if ev.Cap() < len(obj.Instances) {
|
||||
nvs := reflect.MakeSlice(ev.Type(), len(obj.Instances), len(obj.Instances))
|
||||
ev.Set(nvs)
|
||||
}
|
||||
|
||||
for idx, instance := range obj.Instances {
|
||||
target := ev.Index(idx)
|
||||
rt := target.Type()
|
||||
|
||||
counters := make(map[string]*perflib.PerfCounter, len(instance.Counters))
|
||||
for _, ctr := range instance.Counters {
|
||||
if ctr.Def.IsBaseValue && !ctr.Def.IsNanosecondCounter {
|
||||
counters[ctr.Def.Name+"_Base"] = ctr
|
||||
} else {
|
||||
counters[ctr.Def.Name] = ctr
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
tag := f.Tag.Get("perflib")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ctr, found := counters[tag]
|
||||
if !found {
|
||||
log.Debugf("missing counter %q, have %v", tag, counterMapKeys(counters))
|
||||
continue
|
||||
}
|
||||
if !target.Field(i).CanSet() {
|
||||
return fmt.Errorf("tagged field %v cannot be written to", f.Name)
|
||||
}
|
||||
if fieldType := target.Field(i).Type(); fieldType != reflect.TypeOf((*float64)(nil)).Elem() {
|
||||
return fmt.Errorf("tagged field %v has wrong type %v, must be float64", f.Name, fieldType)
|
||||
}
|
||||
|
||||
switch ctr.Def.CounterType {
|
||||
case perflibCollector.PERF_ELAPSED_TIME:
|
||||
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
|
||||
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
|
||||
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
|
||||
default:
|
||||
target.Field(i).SetFloat(float64(ctr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Name != "" && target.FieldByName("Name").CanSet() {
|
||||
target.FieldByName("Name").SetString(instance.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func counterMapKeys(m map[string]*perflib.PerfCounter) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
125
collector/perflib_test.go
Normal file
125
collector/perflib_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
)
|
||||
|
||||
type simple struct {
|
||||
ValA float64 `perflib:"Something"`
|
||||
ValB float64 `perflib:"Something Else"`
|
||||
}
|
||||
|
||||
func TestUnmarshalPerflib(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
obj *perflib.PerfObject
|
||||
|
||||
expectedOutput []simple
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "nil check",
|
||||
obj: nil,
|
||||
expectedOutput: []simple{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "Simple",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple properties",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something Else",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 256,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123, ValB: 256}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple instances",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 321,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 231,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 321}, {ValA: 231}},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
output := make([]simple, 0)
|
||||
err := unmarshalObject(c.obj, &output)
|
||||
if err != nil && !c.expectError {
|
||||
t.Errorf("Did not expect error, got %q", err)
|
||||
}
|
||||
if err == nil && c.expectError {
|
||||
t.Errorf("Expected an error, but got ok")
|
||||
}
|
||||
|
||||
if err == nil && !reflect.DeepEqual(output, c.expectedOutput) {
|
||||
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func NewProcessCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ProcessCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ProcessCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting process metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -64,7 +64,7 @@ func NewserviceCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting service metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -69,7 +69,7 @@ func NewSystemCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting system metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -90,7 +90,7 @@ func NewTCPCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -212,7 +212,7 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
// Update implements the Collector interface.
|
||||
func (c *textFileCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
error := 0.0
|
||||
mtimes := map[string]time.Time{}
|
||||
|
||||
|
||||
103
collector/thermalzone.go
Normal file
103
collector/thermalzone.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["thermalzone"] = NewThermalZoneCollector
|
||||
}
|
||||
|
||||
// A thermalZoneCollector is a Prometheus collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics
|
||||
type thermalZoneCollector struct {
|
||||
PercentPassiveLimit *prometheus.Desc
|
||||
Temperature *prometheus.Desc
|
||||
ThrottleReasons *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewThermalZoneCollector ...
|
||||
func NewThermalZoneCollector() (Collector, error) {
|
||||
const subsystem = "thermalzone"
|
||||
return &thermalZoneCollector{
|
||||
Temperature: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "temperature_celsius"),
|
||||
"(Temperature)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
PercentPassiveLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "percent_passive_limit"),
|
||||
"(PercentPassiveLimit)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
ThrottleReasons: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "throttle_reasons"),
|
||||
"(ThrottleReasons)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *thermalZoneCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting thermalzone metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Counters_ThermalZoneInformation docs:
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/
|
||||
type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
|
||||
Name string
|
||||
|
||||
HighPrecisionTemperature uint32
|
||||
PercentPassiveLimit uint32
|
||||
ThrottleReasons uint32
|
||||
}
|
||||
|
||||
func (c *thermalZoneCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, info := range dst {
|
||||
//Divide by 10 and subtract 273.15 to convert decikelvin to celsius
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Temperature,
|
||||
prometheus.GaugeValue,
|
||||
(float64(info.HighPrecisionTemperature)/10.0)-273.15,
|
||||
info.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PercentPassiveLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.PercentPassiveLimit),
|
||||
info.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThrottleReasons,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.ThrottleReasons),
|
||||
info.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func NewVmwareCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *VmwareCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectMem(ch); err != nil {
|
||||
log.Error("failed collecting vmware memory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -4,27 +4,9 @@ import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
func className(src interface{}) string {
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
|
||||
@@ -9,6 +9,7 @@ This directory contains documentation of the collectors in the WMI exporter, wit
|
||||
- [`hyperv`](collector.hyperv.md)
|
||||
- [`iis`](collector.iis.md)
|
||||
- [`logical_disk`](collector.logical_disk.md)
|
||||
- [`logon`](collector.logon.md)
|
||||
- [`memory`](collector.memory.md)
|
||||
- [`msmq`](collector.msmq.md)
|
||||
- [`mssql`](collector.mssql.md)
|
||||
@@ -27,4 +28,4 @@ This directory contains documentation of the collectors in the WMI exporter, wit
|
||||
- [`system`](collector.system.md)
|
||||
- [`tcp`](collector.tcp.md)
|
||||
- [`textfile`](collector.textfile.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
|
||||
42
docs/collector.container.md
Normal file
42
docs/collector.container.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# container collector
|
||||
|
||||
The container collector exposes metrics about containers running on system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `container`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_container_available` | Available | counter | `container_id`
|
||||
`wmi_container_count` | Number of containers | gauge | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_kernelmode` | Run time in Kernel mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_usermode` | Run Time in User mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_total` | Total Run time in Seconds | counter | `container_id`
|
||||
`wmi_container_memory_usage_commit_bytes` | Memory Usage Commit Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_commit_peak_bytes` | Memory Usage Commit Peak Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_private_working_set_bytes` | Memory Usage Private Working Set Bytes | gauge | `container_id`
|
||||
`wmi_container_network_receive_bytes_total` | Bytes Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_total` | Packets Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_dropped_total` | Dropped Incoming Packets on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_bytes_total` | Bytes Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_total` | Packets Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_dropped_total` | Dropped Outgoing Packets on Interface | counter | `container_id`, `interface`
|
||||
|
||||
### Example metric
|
||||
_wmi_container_network_receive_bytes_total{container_id="docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e",interface="822179E7-002C-4280-ABBA-28BCFE401826"} 9.3305343e+07_
|
||||
|
||||
This metric means that total _9.3305343e+07_ bytes received on interface _822179E7-002C-4280-ABBA-28BCFE401826_ for container _docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -1,31 +1,60 @@
|
||||
# cpu collector
|
||||
|
||||
The cpu collector exposes metrics about CPU usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cpu`
|
||||
Classes | [`Win32_PerfRawData_PerfOS_Processor`](https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_cstate_seconds_total` | Time spent in low-power idle states | counter | `core`, `state`
|
||||
`wmi_cpu_time_total` | Time that processor spent in different modes (idle, user, system, ...) | counter | `core`, `mode`
|
||||
`wmi_cpu_interrupts_total` | Total number of received and serviced hardware interrupts | counter | `core`
|
||||
`wmi_cpu_dpcs_total` | Total number of received and serviced deferred procedure calls (DPCs) | counter | `core`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
# cpu collector
|
||||
|
||||
The cpu collector exposes metrics about CPU usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cpu`
|
||||
Data source | Perflib
|
||||
Counters | `ProcessorInformation` (Windows Server 2008R2 and later) `Processor` (older versions)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
These metrics are available on all versions of Windows:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_cstate_seconds_total` | Time spent in low-power idle states | counter | `core`, `state`
|
||||
`wmi_cpu_time_total` | Time that processor spent in different modes (idle, user, system, ...) | counter | `core`, `mode`
|
||||
`wmi_cpu_interrupts_total` | Total number of received and serviced hardware interrupts | counter | `core`
|
||||
`wmi_cpu_dpcs_total` | Total number of received and serviced deferred procedure calls (DPCs) | counter | `core`
|
||||
|
||||
These metrics are only exposed on Windows Server 2008R2 and later:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_clock_interrupts_total` | Total number of received and serviced clock tick interrupts | `core`
|
||||
`wmi_cpu_idle_break_events_total` | Total number of time processor was woken from idle | `core`
|
||||
`wmi_cpu_parking_status` | Parking Status represents whether a processor is parked or not | `gauge`
|
||||
`wmi_cpu_core_frequency_mhz` | Core frequency in megahertz | `gauge`
|
||||
`wmi_cpu_processor_performance` | Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100% | `gauge`
|
||||
|
||||
### Example metric
|
||||
Show frequency of host CPU cores
|
||||
```
|
||||
wmi_cpu_core_frequency_mhz{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Show cpu usage by mode.
|
||||
```
|
||||
sum by (mode) (irate(wmi_cpu_time_total{instance="localhost"}[5m]))
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```
|
||||
# Alert on hosts with more than 80% CPU usage over a 10 minute period
|
||||
- alert: CpuUsage
|
||||
expr: 100 - (avg by (instance) (irate(wmi_cpu_time_total{mode="idle"}[2m])) * 100) > 80
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "CPU Usage (instance {{ $labels.instance }})"
|
||||
description: "CPU Usage is more than 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
|
||||
@@ -1,44 +1,75 @@
|
||||
# logical_disk collector
|
||||
|
||||
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logical_disk`
|
||||
Classes | [`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.logical_disk.volume-whitelist`
|
||||
|
||||
If given, a disk needs to match the whitelist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
### `--collector.logical_disk.volume-blacklist`
|
||||
|
||||
If given, a disk needs to *not* match the blacklist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`requests_queued` | _Not yet documented_ | gauge | `volume`
|
||||
`read_bytes_total` | _Not yet documented_ | counter | `volume`
|
||||
`reads_total` | _Not yet documented_ | counter | `volume`
|
||||
`write_bytes_total` | _Not yet documented_ | counter | `volume`
|
||||
`writes_total` | _Not yet documented_ | counter | `volume`
|
||||
`read_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`write_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`free_bytes` | _Not yet documented_ | gauge | `volume`
|
||||
`size_bytes` | _Not yet documented_ | gauge | `volume`
|
||||
`idle_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`split_ios_total` | _Not yet documented_ | counter | `volume`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
# logical_disk collector
|
||||
|
||||
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logical_disk`
|
||||
Classes | [`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.logical_disk.volume-whitelist`
|
||||
|
||||
If given, a disk needs to match the whitelist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
### `--collector.logical_disk.volume-blacklist`
|
||||
|
||||
If given, a disk needs to *not* match the blacklist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `volume`
|
||||
`read_bytes_total` | Rate at which bytes are transferred from the disk during read operations | counter | `volume`
|
||||
`reads_total` | Rate of read operations on the disk | counter | `volume`
|
||||
`write_bytes_total` | Rate at which bytes are transferred to the disk during write operations | counter | `volume`
|
||||
`writes_total` | Rate of write operations on the disk | counter | `volume`
|
||||
`read_seconds_total` | Seconds the disk was busy servicing read requests | counter | `volume`
|
||||
`write_seconds_total` | Seconds the disk was busy servicing write requests | counter | `volume`
|
||||
`free_bytes` | Unused space of the disk in bytes | gauge | `volume`
|
||||
`size_bytes` | Total size of the disk in bytes | gauge | `volume`
|
||||
`idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `volume`
|
||||
`split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `volume`
|
||||
|
||||
### Example metric
|
||||
Query the rate of write operations to a disk
|
||||
```
|
||||
rate(wmi_logical_disk_read_bytes_total{instance="localhost", volume=~"C:"}[2m])
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Calculate rate of total IOPS for disk
|
||||
```
|
||||
rate(wmi_logical_disk_reads_total{instance="localhost", volume="C:"}[2m]) + rate(wmi_logical_disk_writes_total{instance="localhost", volume="C:"}[2m])
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```
|
||||
groups:
|
||||
- name: Windows Disk Alerts
|
||||
rules:
|
||||
|
||||
# Sends an alert when disk space usage is above 95%
|
||||
- alert: DiskSpaceUsage
|
||||
expr: 100.0 - 100 * (wmi_logical_disk_free_bytes / wmi_logical_disk_size_bytes) > 95
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Disk Space Usage (instance {{ $labels.instance }})"
|
||||
description: "Disk Space on Drive is used more than 95%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
|
||||
# Alerts on disks with over 85% space usage predicted to fill within the next four days
|
||||
- alert: DiskFilling
|
||||
expr: 100 * (wmi_logical_disk_free_bytes / wmi_logical_disk_size_bytes) < 15 and predict_linear(wmi_logical_disk_free_bytes[6h], 4 * 24 * 3600) < 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Disk full in four days (instance {{ $labels.instance }})"
|
||||
description: "{{ $labels.volume }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
|
||||
34
docs/collector.logon.md
Normal file
34
docs/collector.logon.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# logon collector
|
||||
|
||||
The logon collector exposes metrics detailing the active user logon sessions.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logon`
|
||||
Classes | [`Win32_LogonSession`](https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_logon_logon_type` | Number of active user logon sessions | gauge | status
|
||||
|
||||
### Example metric
|
||||
Query the total number of interactive logon sessions
|
||||
```
|
||||
wmi_logon_logon_type{status="interactive"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Query the total number of local and remote (I.E. Terminal Services) interactive sessions.
|
||||
```
|
||||
wmi_logon_logon_type{status=~"interactive|remoteinteractive"}
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
The memory collector exposes metrics about system memory usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `memory`
|
||||
Classes | `Win32_PerfRawData_PerfOS_Memory`
|
||||
Enabled by default? | Yes
|
||||
@@ -17,25 +19,25 @@ Name | Description | Type | Labels
|
||||
`wmi_cs_logical_processors` | Number of installed logical processors | gauge | None
|
||||
`wmi_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
|
||||
`wmi_memory_available_bytes` | The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists | gauge | None
|
||||
`wmi_memory_cache_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_bytes_peak` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_commit_limit` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_committed_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_bytes` | Number of bytes currently being used by the file system cache | gauge | None
|
||||
`wmi_memory_cache_bytes_peak` | Maximum number of CacheBytes after the system was last restarted | gauge | None
|
||||
`wmi_memory_cache_faults_total` | Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) or from disk (hard fault) | gauge | None
|
||||
`wmi_memory_commit_limit` | Amount of virtual memory, in bytes, that can be committed without having to extend the paging file(s) | gauge | None
|
||||
`wmi_memory_committed_bytes` | Amount of committed virtual memory, in bytes | gauge | None
|
||||
`wmi_memory_demand_zero_faults_total` | The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space | gauge | None
|
||||
`wmi_memory_free_and_zero_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_free_system_page_table_entries` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_free_system_page_table_entries` | Number of page table entries not being used by the system | gauge | None
|
||||
`wmi_memory_modified_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_page_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_page_faults_total` | Overall rate at which faulted pages are handled by the processor | gauge | None
|
||||
`wmi_memory_swap_page_reads_total` | Number of disk page reads (a single read operation reading several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_swap_pages_read_total` | Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) | gauge | None
|
||||
`wmi_memory_swap_pages_written_total` | Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) | gauge | None
|
||||
`wmi_memory_swap_page_operations_total` | Total number of swap page read and writes (PagesPersec) | gauge | None
|
||||
`wmi_memory_swap_page_writes_total` | Number of disk page writes (a single write operation writing several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_pool_nonpaged_allocs_total` | The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written to disk, and must remain in physical memory as long as they are allocated | gauge | None
|
||||
`wmi_memory_pool_nonpaged_bytes_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_paged_allocs_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_paged_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_nonpaged_bytes_total` | Number of bytes in the non-paged pool | gauge | None
|
||||
`wmi_memory_pool_paged_allocs_total` | Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call | gauge | None
|
||||
`wmi_memory_pool_paged_bytes` | Number of bytes in the paged pool | gauge | None
|
||||
`wmi_memory_pool_paged_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_core_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_normal_priority_bytes` | _Not yet documented_ | gauge | None
|
||||
|
||||
@@ -5,14 +5,14 @@ The mssql collector exposes metrics about the MSSQL server
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `mssql`
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLErrors`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerTransactions`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collectors.mssql.classes-enabled`
|
||||
|
||||
Comma-separated list of MSSQL WMI classes to use. Supported values are `accessmethods`, `availreplica`, `bufman`, `databases`, `dbreplica`, `genstats`, `locks`, `memmgr` and `sqlstats`.
|
||||
Comma-separated list of MSSQL WMI classes to use. Supported values are `accessmethods`, `availreplica`, `bufman`, `databases`, `dbreplica`, `genstats`, `locks`, `memmgr`, `sqlstats`, `sqlerrors` and `transactions`.
|
||||
|
||||
### `--collectors.mssql.class-print`
|
||||
|
||||
@@ -230,6 +230,20 @@ Name | Description | Type | Labels
|
||||
`wmi_mssql_sqlstats_sql_compilations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_recompilations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_unsafe_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sql_errors_total` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_transactions_tempdb_free_space_bytes` | _Not yet documented_ | gauge | `instance`
|
||||
`wmi_mssql_transactions_longest_transaction_running_seconds` | _Not yet documented_ | gauge | `instance`
|
||||
`wmi_mssql_transactions_nonsnapshot_version_active_total` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_transactions_snapshot_active_total` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_transactions_active_total` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_transactions_update_conflicts_total` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_transactions_update_snapshot_active_total` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_transactions_version_cleanup_rate_bytes` | _Not yet documented_ | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_generation_rate_bytes` | _Not yet documented_ | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_store_size_bytes` | _Not yet documented_ | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_store_units` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_transactions_version_store_creation_units` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_transactions_version_store_truncation_units` | _Not yet documented_ | counter | `instance`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
@@ -22,24 +22,40 @@ If given, an interface name needs to *not* match the blacklist regexp in order f
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_net_bytes_received_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_sent_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_outbound_discarded` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_outbound_errors` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_discarded` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_errors` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_unknown` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_sent_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_current_bandwidth` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_received_total` | Total bytes received by interface | counter | `nic`
|
||||
`wmi_net_bytes_sent_total` | Total bytes transmitted by interface | counter | `nic`
|
||||
`wmi_net_bytes_total` | Total bytes received and transmitted by interface | counter | `nic`
|
||||
`wmi_net_packets_outbound_discarded` | Total outbound packets that were chosen to be discarded even though no errors had been detected to prevent transmission | counter | `nic`
|
||||
`wmi_net_packets_outbound_errors` | Total packets that could not be transmitted due to errors | counter | `nic`
|
||||
`wmi_net_packets_received_discarded` | Total inbound packets that were chosen to be discarded even though no errors had been detected to prevent delivery | counter | `nic`
|
||||
`wmi_net_packets_received_errors` | Total packets that could not be received due to errors | counter | `nic`
|
||||
`wmi_net_packets_received_total` | Total packets received by interface | counter | `nic`
|
||||
`wmi_net_packets_received_unknown` | Total packets received by interface that were discarded because of an unknown or unsupported protocol | counter | `nic`
|
||||
`wmi_net_packets_total` | Total packets received and transmitted by interface | counter | `nic`
|
||||
`wmi_net_packets_sent_total` | Total packets transmitted by interface | counter | `nic`
|
||||
`wmi_net_current_bandwidth` | Estimate of the interface's current bandwidth in bits per second (bps) | counter | `nic`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
Query the rate of transmitted network traffic
|
||||
```
|
||||
rate(wmi_net_bytes_sent_total{instance="localhost"}[2m])
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
Get total utilisation of network interface as a percentage
|
||||
```
|
||||
rate(wmi_net_bytes_total{instance="localhost", nic="Microsoft_Hyper_V_Network_Adapter__1"}[2m]) * 8 / wmi_net_current_bandwidth{instance="locahost", nic="Microsoft_Hyper_V_Network_Adapter__1"} * 100
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
**prometheus.rules**
|
||||
```
|
||||
- alert: NetInterfaceUsage
|
||||
expr: rate(wmi_net_bytes_total[2m]) * 8 / wmi_net_current_bandwidth * 100 > 90
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Network Interface Usage (instance {{ $labels.instance }})"
|
||||
description: "Network traffic usage is greater than 95% for interface {{ $labels.nic }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
|
||||
@@ -66,10 +66,42 @@ A service can have any of the following statuses:
|
||||
Note that there is some overlap with service state.
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
Lists the services that have a 'disabled' start mode.
|
||||
```
|
||||
wmi_service_start_mode{exported_name=~"(mssqlserver|sqlserveragent)",start_mode="disabled"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
Counts the number of Microsoft SQL Server/Agent Processes
|
||||
```
|
||||
count(wmi_service_state{exported_name=~"(sqlserveragent|mssqlserver)",state="running"})
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
**prometheus.rules**
|
||||
```
|
||||
groups:
|
||||
- name: Microsoft SQL Server Alerts
|
||||
rules:
|
||||
|
||||
# Sends an alert when the 'sqlserveragent' service is not in the running state for 3 minutes.
|
||||
- alert: SQL Server Agent DOWN
|
||||
expr: wmi_service_state{instance="SQL",exported_name="sqlserveragent",state="running"} == 0
|
||||
for: 3m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Service {{ $labels.exported_name }} down"
|
||||
description: "Service {{ $labels.exported_name }} on instance {{ $labels.instance }} has been down for more than 3 minutes."
|
||||
|
||||
# Sends an alert when the 'mssqlserver' service is not in the running state for 3 minutes.
|
||||
- alert: SQL Server DOWN
|
||||
expr: wmi_service_state{instance="SQL",exported_name="mssqlserver",state="running"} == 0
|
||||
for: 3m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Service {{ $labels.exported_name }} down"
|
||||
description: "Service {{ $labels.exported_name }} on instance {{ $labels.instance }} has been down for more than 3 minutes."
|
||||
```
|
||||
In this example, `instance` is the target label of the host. So each alert will be processed per host, which is then used in the alert description.
|
||||
|
||||
@@ -16,18 +16,24 @@ None
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_system_context_switches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_exception_dispatches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_processor_queue_length` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_system_calls_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_system_up_time` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_threads` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_context_switches_total` | Total number of [context switches](https://en.wikipedia.org/wiki/Context_switch) | counter | None
|
||||
`wmi_system_exception_dispatches_total` | Total exceptions dispatched by the system | counter | None
|
||||
`wmi_system_processor_queue_length` | Number of threads in the processor queue. There is a single queue for processor time even on computers with multiple processors. | gauge | None
|
||||
`wmi_system_system_calls_total` | Total combined calls to Windows NT system service routines by all processes running on the computer | counter | None
|
||||
`wmi_system_system_up_time` | Time of last boot of system | gauge | None
|
||||
`wmi_system_threads` | Number of Windows system [threads](https://en.wikipedia.org/wiki/Thread_(computing)) | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
Show current number of system threads
|
||||
```
|
||||
wmi_system_threads{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
Find hosts that have rebooted in the last 24 hours
|
||||
```
|
||||
time() - wmi_system_system_up_time < 86400
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
32
docs/collector.thermalzone.md
Normal file
32
docs/collector.thermalzone.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# thermalzone collector
|
||||
|
||||
The thermalzone collector exposes metrics about system temps. Note that temperature is given in Kelvin
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `thermalzone`
|
||||
Classes | [`Win32_PerfRawData_Counters_ThermalZoneInformation`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/#temperature_properties)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_thermalzone_percent_passive_limit` | % Passive Limit is the current limit this thermal zone is placing on the devices it controls. A limit of 100% indicates the devices are unconstrained. A limit of 0% indicates the devices are fully constrained. | gauge | None
|
||||
`wmi_thermalzone_temperature_celsius ` | Temperature of the thermal zone, in degrees Celsius. | gauge | None
|
||||
`wmi_thermalzone_throttle_reasons ` | Throttle Reasons indicate reasons why the thermal zone is limiting performance of the devices it controls. 0x0 – The zone is not throttled. 0x1 – The zone is throttled for thermal reasons. 0x2 – The zone is throttled to limit electrical current. | gauge | None
|
||||
|
||||
[`Throttle reasons` source](https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/examples--requirements-and-diagnostics)
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
218
exporter.go
218
exporter.go
@@ -5,7 +5,10 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -23,7 +26,8 @@ import (
|
||||
|
||||
// WmiCollector implements the prometheus.Collector interface.
|
||||
type WmiCollector struct {
|
||||
collectors map[string]collector.Collector
|
||||
maxScrapeDuration time.Duration
|
||||
collectors map[string]collector.Collector
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -45,6 +49,18 @@ var (
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
scrapeTimeoutDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "collector_timeout"),
|
||||
"wmi_exporter: Whether the collector timed out.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
snapshotDuration = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "perflib_snapshot_duration_seconds"),
|
||||
"Duration of perflib snapshot capture",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
// This can be removed when client_golang exposes this on Windows
|
||||
// (See https://github.com/prometheus/client_golang/issues/376)
|
||||
@@ -64,25 +80,113 @@ func (coll WmiCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- scrapeSuccessDesc
|
||||
}
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus. Collect could be called several times concurrently
|
||||
// and thus its run is protected by a single mutex.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
execute(name, c, ch)
|
||||
wg.Done()
|
||||
}(name, c)
|
||||
}
|
||||
type collectorOutcome int
|
||||
|
||||
const (
|
||||
pending collectorOutcome = iota
|
||||
success
|
||||
failed
|
||||
)
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
startTimeDesc,
|
||||
prometheus.CounterValue,
|
||||
startTime,
|
||||
)
|
||||
wg.Wait()
|
||||
|
||||
t := time.Now()
|
||||
scrapeContext, err := collector.PrepareScrapeContext()
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
snapshotDuration,
|
||||
prometheus.GaugeValue,
|
||||
time.Since(t).Seconds(),
|
||||
)
|
||||
if err != nil {
|
||||
ch <- prometheus.NewInvalidMetric(scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
collectorOutcomes := make(map[string]collectorOutcome)
|
||||
for name := range coll.collectors {
|
||||
collectorOutcomes[name] = pending
|
||||
}
|
||||
|
||||
metricsBuffer := make(chan prometheus.Metric)
|
||||
l := sync.Mutex{}
|
||||
finished := false
|
||||
go func() {
|
||||
for m := range metricsBuffer {
|
||||
l.Lock()
|
||||
if !finished {
|
||||
ch <- m
|
||||
}
|
||||
l.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
defer wg.Done()
|
||||
outcome := execute(name, c, scrapeContext, metricsBuffer)
|
||||
l.Lock()
|
||||
if !finished {
|
||||
collectorOutcomes[name] = outcome
|
||||
}
|
||||
l.Unlock()
|
||||
}(name, c)
|
||||
}
|
||||
|
||||
allDone := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(allDone)
|
||||
close(metricsBuffer)
|
||||
}()
|
||||
|
||||
// Wait until either all collectors finish, or timeout expires
|
||||
select {
|
||||
case <-allDone:
|
||||
case <-time.After(coll.maxScrapeDuration):
|
||||
}
|
||||
|
||||
l.Lock()
|
||||
finished = true
|
||||
|
||||
remainingCollectorNames := make([]string, 0)
|
||||
for name, outcome := range collectorOutcomes {
|
||||
var successValue, timeoutValue float64
|
||||
if outcome == pending {
|
||||
timeoutValue = 1.0
|
||||
remainingCollectorNames = append(remainingCollectorNames, name)
|
||||
}
|
||||
if outcome == success {
|
||||
successValue = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
successValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeTimeoutDesc,
|
||||
prometheus.GaugeValue,
|
||||
timeoutValue,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
if len(remainingCollectorNames) > 0 {
|
||||
log.Warn("Collection timed out, still waiting for ", remainingCollectorNames)
|
||||
}
|
||||
|
||||
l.Unlock()
|
||||
}
|
||||
|
||||
func filterAvailableCollectors(collectors string) string {
|
||||
@@ -96,31 +200,23 @@ func filterAvailableCollectors(collectors string) string {
|
||||
return strings.Join(availableCollectors, ",")
|
||||
}
|
||||
|
||||
func execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
|
||||
begin := time.Now()
|
||||
err := c.Collect(ch)
|
||||
duration := time.Since(begin)
|
||||
var success float64
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("collector %s failed after %fs: %s", name, duration.Seconds(), err)
|
||||
success = 0
|
||||
} else {
|
||||
log.Debugf("collector %s succeeded after %fs.", name, duration.Seconds())
|
||||
success = 1
|
||||
}
|
||||
func execute(name string, c collector.Collector, ctx *collector.ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome {
|
||||
t := time.Now()
|
||||
err := c.Collect(ctx, ch)
|
||||
duration := time.Since(t).Seconds()
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeDurationDesc,
|
||||
prometheus.GaugeValue,
|
||||
duration.Seconds(),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
success,
|
||||
duration,
|
||||
name,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("collector %s failed after %fs: %s", name, duration, err)
|
||||
return failed
|
||||
}
|
||||
log.Debugf("collector %s succeeded after %fs.", name, duration)
|
||||
return success
|
||||
}
|
||||
|
||||
func expandEnabledCollectors(enabled string) []string {
|
||||
@@ -157,10 +253,6 @@ func loadCollectors(list string) (map[string]collector.Collector, error) {
|
||||
return collectors, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(version.NewCollector("wmi_exporter"))
|
||||
}
|
||||
|
||||
func initWbem() {
|
||||
// This initialization prevents a memory leak on WMF 5+. See
|
||||
// https://github.com/martinlindhe/wmi_exporter/issues/77 and linked issues
|
||||
@@ -192,6 +284,10 @@ func main() {
|
||||
"collectors.print",
|
||||
"If true, print available collectors and exit.",
|
||||
).Bool()
|
||||
timeoutMargin = kingpin.Flag(
|
||||
"scrape.timeout-margin",
|
||||
"Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.",
|
||||
).Default("0.5").Float64()
|
||||
)
|
||||
|
||||
log.AddFlags(kingpin.CommandLine)
|
||||
@@ -236,10 +332,17 @@ func main() {
|
||||
|
||||
log.Infof("Enabled collectors: %v", strings.Join(keys(collectors), ", "))
|
||||
|
||||
nodeCollector := WmiCollector{collectors: collectors}
|
||||
prometheus.MustRegister(nodeCollector)
|
||||
h := &metricsHandler{
|
||||
timeoutMargin: *timeoutMargin,
|
||||
collectorFactory: func(timeout time.Duration) *WmiCollector {
|
||||
return &WmiCollector{
|
||||
collectors: collectors,
|
||||
maxScrapeDuration: timeout,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
http.Handle(*metricsPath, promhttp.Handler())
|
||||
http.Handle(*metricsPath, h)
|
||||
http.HandleFunc("/health", healthCheck)
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, *metricsPath, http.StatusMovedPermanently)
|
||||
@@ -303,3 +406,36 @@ loop:
|
||||
changes <- svc.Status{State: svc.StopPending}
|
||||
return
|
||||
}
|
||||
|
||||
type metricsHandler struct {
|
||||
timeoutMargin float64
|
||||
collectorFactory func(timeout time.Duration) *WmiCollector
|
||||
}
|
||||
|
||||
func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
const defaultTimeout = 10.0
|
||||
|
||||
var timeoutSeconds float64
|
||||
if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
|
||||
var err error
|
||||
timeoutSeconds, err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Warnf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout)
|
||||
}
|
||||
}
|
||||
if timeoutSeconds == 0 {
|
||||
timeoutSeconds = defaultTimeout
|
||||
}
|
||||
timeoutSeconds = timeoutSeconds - mh.timeoutMargin
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(mh.collectorFactory(time.Duration(timeoutSeconds * float64(time.Second))))
|
||||
reg.MustRegister(
|
||||
prometheus.NewProcessCollector(os.Getpid(), ""),
|
||||
prometheus.NewGoCollector(),
|
||||
version.NewCollector("wmi_exporter"),
|
||||
)
|
||||
|
||||
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ else {
|
||||
$members = $wmiObject `
|
||||
| Get-Member -MemberType Properties `
|
||||
| Where-Object { $_.Definition -Match '^u?int' -and $_.Name -NotMatch '_' } `
|
||||
| Select-Object Name, @{Name="Type";Expression={$_.Definition.Split(" ")[0]}}
|
||||
| Select-Object Name, @{Name="Type";Expression={$_.Definition.Split(" ")[0]}})
|
||||
$input = @{
|
||||
"Class"=$Class;
|
||||
"CollectorName"=$CollectorName;
|
||||
|
||||
@@ -29,7 +29,7 @@ func New{{ .CollectorName }}Collector() (Collector, error) {
|
||||
}
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *{{ .CollectorName }}Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *{{ .CollectorName }}Collector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting {{ .CollectorName | toLower }} metrics:", desc, err)
|
||||
return err
|
||||
|
||||
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.exe
|
||||
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# go-winio
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tar implements access to tar archives.
|
||||
// It aims to cover most of the variations, including those produced
|
||||
// by GNU and BSD tars.
|
||||
//
|
||||
// References:
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
||||
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 512
|
||||
|
||||
// Types
|
||||
TypeReg = '0' // regular file
|
||||
TypeRegA = '\x00' // regular file
|
||||
TypeLink = '1' // hard link
|
||||
TypeSymlink = '2' // symbolic link
|
||||
TypeChar = '3' // character device node
|
||||
TypeBlock = '4' // block device node
|
||||
TypeDir = '5' // directory
|
||||
TypeFifo = '6' // fifo node
|
||||
TypeCont = '7' // reserved
|
||||
TypeXHeader = 'x' // extended header
|
||||
TypeXGlobalHeader = 'g' // global extended header
|
||||
TypeGNULongName = 'L' // Next file has a long name
|
||||
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
||||
TypeGNUSparse = 'S' // sparse file
|
||||
)
|
||||
|
||||
// A Header represents a single header in a tar archive.
|
||||
// Some fields may not be populated.
|
||||
type Header struct {
|
||||
Name string // name of header file entry
|
||||
Mode int64 // permission and mode bits
|
||||
Uid int // user id of owner
|
||||
Gid int // group id of owner
|
||||
Size int64 // length in bytes
|
||||
ModTime time.Time // modified time
|
||||
Typeflag byte // type of header entry
|
||||
Linkname string // target name of link
|
||||
Uname string // user name of owner
|
||||
Gname string // group name of owner
|
||||
Devmajor int64 // major number of character or block device
|
||||
Devminor int64 // minor number of character or block device
|
||||
AccessTime time.Time // access time
|
||||
ChangeTime time.Time // status change time
|
||||
CreationTime time.Time // creation time
|
||||
Xattrs map[string]string
|
||||
Winheaders map[string]string
|
||||
}
|
||||
|
||||
// File name constants from the tar spec.
|
||||
const (
|
||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||
)
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements os.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
h *Header
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||
|
||||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Base(path.Clean(fi.h.Name))
|
||||
}
|
||||
return path.Base(fi.h.Name)
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// Set file permission bits.
|
||||
mode = os.FileMode(fi.h.Mode).Perm()
|
||||
|
||||
// Set setuid, setgid and sticky bits.
|
||||
if fi.h.Mode&c_ISUID != 0 {
|
||||
// setuid
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if fi.h.Mode&c_ISGID != 0 {
|
||||
// setgid
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if fi.h.Mode&c_ISVTX != 0 {
|
||||
// sticky
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
|
||||
// Set file mode bits.
|
||||
// clear perm, setuid, setgid and sticky bits.
|
||||
m := os.FileMode(fi.h.Mode) &^ 07777
|
||||
if m == c_ISDIR {
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
}
|
||||
if m == c_ISFIFO {
|
||||
// named pipe (FIFO)
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
if m == c_ISLNK {
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
}
|
||||
if m == c_ISBLK {
|
||||
// device file
|
||||
mode |= os.ModeDevice
|
||||
}
|
||||
if m == c_ISCHR {
|
||||
// Unix character device
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
}
|
||||
if m == c_ISSOCK {
|
||||
// Unix domain socket
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
|
||||
switch fi.h.Typeflag {
|
||||
case TypeSymlink:
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
case TypeChar:
|
||||
// character device node
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case TypeBlock:
|
||||
// block device node
|
||||
mode |= os.ModeDevice
|
||||
case TypeDir:
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
case TypeFifo:
|
||||
// fifo node
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
// Mode constants from the tar spec.
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
const (
|
||||
paxAtime = "atime"
|
||||
paxCharset = "charset"
|
||||
paxComment = "comment"
|
||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||
paxCreationTime = "LIBARCHIVE.creationtime"
|
||||
paxGid = "gid"
|
||||
paxGname = "gname"
|
||||
paxLinkpath = "linkpath"
|
||||
paxMtime = "mtime"
|
||||
paxPath = "path"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxUname = "uname"
|
||||
paxXattr = "SCHILY.xattr."
|
||||
paxWindows = "MSWINDOWS."
|
||||
paxNone = ""
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
// Because os.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
// of the returned header to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("tar: FileInfo is nil")
|
||||
}
|
||||
fm := fi.Mode()
|
||||
h := &Header{
|
||||
Name: fi.Name(),
|
||||
ModTime: fi.ModTime(),
|
||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Mode |= c_ISREG
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Mode |= c_ISDIR
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Mode |= c_ISLNK
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Mode |= c_ISCHR
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Mode |= c_ISBLK
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
h.Mode |= c_ISFIFO
|
||||
case fm&os.ModeSocket != 0:
|
||||
h.Mode |= c_ISSOCK
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
if fm&os.ModeSetuid != 0 {
|
||||
h.Mode |= c_ISUID
|
||||
}
|
||||
if fm&os.ModeSetgid != 0 {
|
||||
h.Mode |= c_ISGID
|
||||
}
|
||||
if fm&os.ModeSticky != 0 {
|
||||
h.Mode |= c_ISVTX
|
||||
}
|
||||
// If possible, populate additional fields from OS-specific
|
||||
// FileInfo fields.
|
||||
if sys, ok := fi.Sys().(*Header); ok {
|
||||
// This FileInfo came from a Header (not the OS). Use the
|
||||
// original Header to populate all remaining fields.
|
||||
h.Uid = sys.Uid
|
||||
h.Gid = sys.Gid
|
||||
h.Uname = sys.Uname
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var zeroBlock = make([]byte, blockSize)
|
||||
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||
// We compute and return both.
|
||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||
for i := 0; i < len(header); i++ {
|
||||
if i == 148 {
|
||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||
unsigned += ' ' * 8
|
||||
signed += ' ' * 8
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
unsigned += int64(header[i])
|
||||
signed += int64(int8(header[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type slicer []byte
|
||||
|
||||
func (sp *slicer) next(n int) (b []byte) {
|
||||
s := *sp
|
||||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, c := range s {
|
||||
if c < 0x80 {
|
||||
buf.WriteByte(byte(c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
switch flag {
|
||||
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// Create a buffer to write our archive to.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Create a new tar archive.
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
// Add some files to the archive.
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"readme.txt", "This archive contains some text files."},
|
||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||
{"todo.txt", "Get animal handling license."},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Open the tar archive for reading.
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
tr := tar.NewReader(r)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Printf("Contents of %s:\n", hdr.Name)
|
||||
if _, err := io.Copy(os.Stdout, tr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Contents of readme.txt:
|
||||
// This archive contains some text files.
|
||||
// Contents of gopher.txt:
|
||||
// Gopher names:
|
||||
// George
|
||||
// Geoffrey
|
||||
// Gonzo
|
||||
// Contents of todo.txt:
|
||||
// Get animal handling license.
|
||||
}
|
||||
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux dragonfly openbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctim.Unix())
|
||||
}
|
||||
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctimespec.Unix())
|
||||
}
|
||||
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
func statUnix(fi os.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
// TODO(bradfitz): populate username & group. os/user
|
||||
// doesn't cache LookupId lookups, and lacks group
|
||||
// lookup functions.
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
// TODO(bradfitz): major/minor device numbers?
|
||||
return nil
|
||||
}
|
||||
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileInfoHeader(t *testing.T) {
|
||||
fi, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "small.txt"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(5); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
// FileInfoHeader should error when passing nil FileInfo
|
||||
if _, err := FileInfoHeader(nil, ""); err == nil {
|
||||
t.Fatalf("Expected error when passing nil to FileInfoHeader")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderDir(t *testing.T) {
|
||||
fi, err := os.Stat("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "testdata/"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
// Ignoring c_ISGID for golang.org/issue/4867
|
||||
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(0); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderSymlink(t *testing.T) {
|
||||
h, err := FileInfoHeader(symlink{}, "some-target")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := h.Name, "some-symlink"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Linkname, "some-target"; g != e {
|
||||
t.Errorf("Linkname = %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
type symlink struct{}
|
||||
|
||||
func (symlink) Name() string { return "some-symlink" }
|
||||
func (symlink) Size() int64 { return 0 }
|
||||
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
|
||||
func (symlink) ModTime() time.Time { return time.Time{} }
|
||||
func (symlink) IsDir() bool { return false }
|
||||
func (symlink) Sys() interface{} { return nil }
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
data := []byte("some file contents")
|
||||
|
||||
var b bytes.Buffer
|
||||
tw := NewWriter(&b)
|
||||
hdr := &Header{
|
||||
Name: "file.txt",
|
||||
Uid: 1 << 21, // too big for 8 octal digits
|
||||
Size: int64(len(data)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
// tar only supports second precision.
|
||||
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("tw.WriteHeader: %v", err)
|
||||
}
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
t.Fatalf("tw.Write: %v", err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatalf("tw.Close: %v", err)
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
tr := NewReader(&b)
|
||||
rHdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatalf("tr.Next: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(rHdr, hdr) {
|
||||
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
|
||||
}
|
||||
rData, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
if !bytes.Equal(rData, data) {
|
||||
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
|
||||
}
|
||||
}
|
||||
|
||||
type headerRoundTripTest struct {
|
||||
h *Header
|
||||
fm os.FileMode
|
||||
}
|
||||
|
||||
func TestHeaderRoundTrip(t *testing.T) {
|
||||
golden := []headerRoundTripTest{
|
||||
// regular file.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "test.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 12,
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// symbolic link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777 | c_ISLNK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360600852, 0),
|
||||
Typeflag: TypeSymlink,
|
||||
},
|
||||
fm: 0777 | os.ModeSymlink,
|
||||
},
|
||||
// character device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/null",
|
||||
Mode: 0666 | c_ISCHR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578951, 0),
|
||||
Typeflag: TypeChar,
|
||||
},
|
||||
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
|
||||
},
|
||||
// block device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/sda",
|
||||
Mode: 0660 | c_ISBLK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578954, 0),
|
||||
Typeflag: TypeBlock,
|
||||
},
|
||||
fm: 0660 | os.ModeDevice,
|
||||
},
|
||||
// directory.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dir/",
|
||||
Mode: 0755 | c_ISDIR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360601116, 0),
|
||||
Typeflag: TypeDir,
|
||||
},
|
||||
fm: 0755 | os.ModeDir,
|
||||
},
|
||||
// fifo node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/initctl",
|
||||
Mode: 0600 | c_ISFIFO,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578949, 0),
|
||||
Typeflag: TypeFifo,
|
||||
},
|
||||
fm: 0600 | os.ModeNamedPipe,
|
||||
},
|
||||
// setuid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "bin/su",
|
||||
Mode: 0755 | c_ISREG | c_ISUID,
|
||||
Size: 23232,
|
||||
ModTime: time.Unix(1355405093, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0755 | os.ModeSetuid,
|
||||
},
|
||||
// setguid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "group.txt",
|
||||
Mode: 0750 | c_ISREG | c_ISGID,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360602346, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0750 | os.ModeSetgid,
|
||||
},
|
||||
// sticky.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "sticky.txt",
|
||||
Mode: 0600 | c_ISREG | c_ISVTX,
|
||||
Size: 7,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600 | os.ModeSticky,
|
||||
},
|
||||
// hard link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 0,
|
||||
Linkname: "file.txt",
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeLink,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// More information.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "info.txt",
|
||||
Mode: 0600 | c_ISREG,
|
||||
Size: 0,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600,
|
||||
},
|
||||
}
|
||||
|
||||
for i, g := range golden {
|
||||
fi := g.h.FileInfo()
|
||||
h2, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(fi.Name(), "/") {
|
||||
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
|
||||
}
|
||||
name := path.Base(g.h.Name)
|
||||
if fi.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
if got, want := h2.Name, name; got != want {
|
||||
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Size, g.h.Size; got != want {
|
||||
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uid, g.h.Uid; got != want {
|
||||
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gid, g.h.Gid; got != want {
|
||||
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uname, g.h.Uname; got != want {
|
||||
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gname, g.h.Gname; got != want {
|
||||
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Linkname, g.h.Linkname; got != want {
|
||||
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Typeflag, g.h.Typeflag; got != want {
|
||||
t.Logf("%#v %#v", g.h, fi.Sys())
|
||||
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Mode, g.h.Mode; got != want {
|
||||
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := fi.Mode(), g.fm; got != want {
|
||||
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := h2.AccessTime, g.h.AccessTime; got != want {
|
||||
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want {
|
||||
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ModTime, g.h.ModTime; got != want {
|
||||
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
|
||||
t.Errorf("i=%d: Sys didn't return original *Header", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
@@ -0,0 +1,444 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// TODO(dsymonds):
|
||||
// - catch more errors (no first header, etc.)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||
// A tar archive consists of a sequence of files.
|
||||
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
||||
// writing at most hdr.Size bytes in total.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
nb int64 // number of unwritten bytes for current file entry
|
||||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
|
||||
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
|
||||
|
||||
// Flush finishes writing the current file (optional).
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.nb > 0 {
|
||||
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
n := tw.nb + tw.pad
|
||||
for n > 0 && tw.err == nil {
|
||||
nr := n
|
||||
if nr > blockSize {
|
||||
nr = blockSize
|
||||
}
|
||||
var nw int
|
||||
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
||||
n -= int64(nw)
|
||||
}
|
||||
tw.nb = 0
|
||||
tw.pad = 0
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
func (f *formatter) formatString(b []byte, s string) {
|
||||
if len(s) > len(b) {
|
||||
f.err = ErrFieldTooLong
|
||||
return
|
||||
}
|
||||
ascii := toASCII(s)
|
||||
copy(b, ascii)
|
||||
if len(ascii) < len(b) {
|
||||
b[len(ascii)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||
func (f *formatter) formatOctal(b []byte, x int64) {
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// leading zeros, but leave room for a NUL.
|
||||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
|
||||
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||
//
|
||||
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||
// equivalent to the sign bit in two's complement form.
|
||||
func fitsInBase256(n int, x int64) bool {
|
||||
var binBits = uint(n-1) * 8
|
||||
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
|
||||
}
|
||||
|
||||
// Write x into b, as binary (GNUtar/star extension).
|
||||
func (f *formatter) formatNumeric(b []byte, x int64) {
|
||||
if fitsInBase256(len(b), x) {
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // Highest bit indicates binary format
|
||||
return
|
||||
}
|
||||
|
||||
f.formatOctal(b, 0) // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(0, 0)
|
||||
// There is room for 11 octal digits (33 bits) of mtime.
|
||||
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
||||
)
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
return tw.writeHeader(hdr, true)
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
// As this method is called internally by writePax header to allow it to
|
||||
// suppress writing the pax header.
|
||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
if tw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
if tw.err == nil {
|
||||
tw.Flush()
|
||||
}
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
var f formatter
|
||||
var header []byte
|
||||
|
||||
// We need to select which scratch buffer to use carefully,
|
||||
// since this method is called recursively to write PAX headers.
|
||||
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
|
||||
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
|
||||
// already being used by the non-recursive call, so we must use paxHdrBuff.
|
||||
header = tw.hdrBuff[:]
|
||||
if !allowPax {
|
||||
header = tw.paxHdrBuff[:]
|
||||
}
|
||||
copy(header, zeroBlock)
|
||||
s := slicer(header)
|
||||
|
||||
// Wrappers around formatter that automatically sets paxHeaders if the
|
||||
// argument extends beyond the capacity of the input byte slice.
|
||||
var formatString = func(b []byte, s string, paxKeyword string) {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
f.formatOctal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and PAX is preferred, use a PAX header.
|
||||
if paxKeyword != paxNone && tw.preferPax {
|
||||
f.formatOctal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
tw.usedBinary = true
|
||||
f.formatNumeric(b, x)
|
||||
}
|
||||
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
|
||||
var unixTime int64
|
||||
if !t.Before(minTime) && !t.After(maxTime) {
|
||||
unixTime = t.Unix()
|
||||
}
|
||||
formatNumeric(b, unixTime, paxNone)
|
||||
|
||||
// Write a PAX header if the time didn't fit precisely.
|
||||
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
|
||||
paxHeaders[paxKeyword] = formatPAXTime(t)
|
||||
}
|
||||
}
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
formatString(pathHeaderBytes, hdr.Name, paxPath)
|
||||
|
||||
f.formatOctal(s.next(8), hdr.Mode) // 100:108
|
||||
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
|
||||
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
|
||||
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
|
||||
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
formatString(s.next(100), hdr.Linkname, paxLinkpath)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
|
||||
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
|
||||
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
|
||||
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
// Since we can encode in USTAR format, disable PAX header.
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// Update the path fields
|
||||
formatString(pathHeaderBytes, suffix, paxNone)
|
||||
formatString(prefixHeaderBytes, prefix, paxNone)
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
f.formatOctal(header[148:155], chksum) // Never fails
|
||||
header[155] = ' '
|
||||
|
||||
// Check if there were any formatting errors.
|
||||
if f.err != nil {
|
||||
tw.err = f.err
|
||||
return tw.err
|
||||
}
|
||||
|
||||
if allowPax {
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
|
||||
}
|
||||
if !hdr.CreationTime.IsZero() {
|
||||
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
|
||||
}
|
||||
for k, v := range hdr.Xattrs {
|
||||
paxHeaders[paxXattr+k] = v
|
||||
}
|
||||
for k, v := range hdr.Winheaders {
|
||||
paxHeaders[paxWindows+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(paxHeaders) > 0 {
|
||||
if !allowPax {
|
||||
return errInvalidHeader
|
||||
}
|
||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
func formatPAXTime(t time.Time) string {
|
||||
sec := t.Unix()
|
||||
usec := t.Nanosecond()
|
||||
s := strconv.FormatInt(sec, 10)
|
||||
if usec != 0 {
|
||||
s = fmt.Sprintf("%s.%09d", s, usec)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
// archive.
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||
// Prepare extended header
|
||||
ext := new(Header)
|
||||
ext.Typeflag = TypeXHeader
|
||||
// Setting ModTime is required for reader parsing to
|
||||
// succeed, and seems harmless enough.
|
||||
ext.ModTime = hdr.ModTime
|
||||
// The spec asks that we namespace our pseudo files
|
||||
// with the current pid. However, this results in differing outputs
|
||||
// for identical inputs. As such, the constant 0 is now used instead.
|
||||
// golang.org/issue/12358
|
||||
dir, file := path.Split(hdr.Name)
|
||||
fullName := path.Join(dir, "PaxHeaders.0", file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Keys are sorted before writing to body to allow deterministic output.
|
||||
var keys []string
|
||||
for k := range paxHeaders {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
if err := tw.writeHeader(ext, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||
// appropriate length.
|
||||
func formatPAXRecord(k, v string) string {
|
||||
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||
size := len(k) + len(v) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
|
||||
// Final adjustment if adding size field increased the record size.
|
||||
if len(record) != size {
|
||||
size = len(record)
|
||||
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
// Write writes to the current entry in the tar archive.
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// hdr.Size bytes are written after WriteHeader.
|
||||
func (tw *Writer) Write(b []byte) (n int, err error) {
|
||||
if tw.closed {
|
||||
err = ErrWriteAfterClose
|
||||
return
|
||||
}
|
||||
overwrite := false
|
||||
if int64(len(b)) > tw.nb {
|
||||
b = b[0:tw.nb]
|
||||
overwrite = true
|
||||
}
|
||||
n, err = tw.w.Write(b)
|
||||
tw.nb -= int64(n)
|
||||
if err == nil && overwrite {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
tw.err = err
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the tar archive, flushing any unwritten
|
||||
// data to the underlying writer.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err != nil || tw.closed {
|
||||
return tw.err
|
||||
}
|
||||
tw.Flush()
|
||||
tw.closed = true
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// trailer: two zero blocks
|
||||
for i := 0; i < 2; i++ {
|
||||
_, tw.err = tw.w.Write(zeroBlock)
|
||||
if tw.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return tw.err
|
||||
}
|
||||
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
@@ -0,0 +1,739 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
)
|
||||
|
||||
type writerTestEntry struct {
|
||||
header *Header
|
||||
contents string
|
||||
}
|
||||
|
||||
type writerTest struct {
|
||||
file string // filename of expected output
|
||||
entries []*writerTestEntry
|
||||
}
|
||||
|
||||
var writerTests = []*writerTest{
|
||||
// The writer test file was produced with this command:
|
||||
// tar (GNU tar) 1.26
|
||||
// ln -s small.txt link.txt
|
||||
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
|
||||
{
|
||||
file: "testdata/writer.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1246508266, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Kilts",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small2.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1245217492, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Google.com\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1314603082, 0),
|
||||
Typeflag: '2',
|
||||
Linkname: "small.txt",
|
||||
Uname: "strings",
|
||||
Gname: "strings",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
|
||||
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
|
||||
{
|
||||
file: "testdata/writer-big.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "tmp/16gig.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1254699560, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
|
||||
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
|
||||
{
|
||||
file: "testdata/writer-big-long.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "16gig.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1399583047, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "guillaume",
|
||||
Gname: "guillaume",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.17
|
||||
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
|
||||
{
|
||||
file: "testdata/ustar.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 0765,
|
||||
Gid: 024,
|
||||
Size: 06,
|
||||
ModTime: time.Unix(1360135598, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "shane",
|
||||
Gname: "staff",
|
||||
},
|
||||
contents: "hello\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.26
|
||||
// echo "Slartibartfast" > file.txt
|
||||
// ln file.txt hard.txt
|
||||
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
|
||||
{
|
||||
file: "testdata/hardlink.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 15,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
contents: "Slartibartfast\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '1',
|
||||
Linkname: "file.txt",
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
|
||||
func bytestr(offset int, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("%04x ", offset)
|
||||
for _, ch := range b {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
|
||||
s += fmt.Sprintf(" %c", ch)
|
||||
default:
|
||||
s += fmt.Sprintf(" %02x", ch)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Render a pseudo-diff between two blocks of bytes.
|
||||
func bytediff(a []byte, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
|
||||
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
|
||||
na, nb := rowLen, rowLen
|
||||
if na > len(a) {
|
||||
na = len(a)
|
||||
}
|
||||
if nb > len(b) {
|
||||
nb = len(b)
|
||||
}
|
||||
sa := bytestr(offset, a[0:na])
|
||||
sb := bytestr(offset, b[0:nb])
|
||||
if sa != sb {
|
||||
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
|
||||
}
|
||||
a = a[na:]
|
||||
b = b[nb:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
testLoop:
|
||||
for i, test := range writerTests {
|
||||
expected, err := ioutil.ReadFile(test.file)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
|
||||
big := false
|
||||
for j, entry := range test.entries {
|
||||
big = big || entry.header.Size > 1<<10
|
||||
if err := tw.WriteHeader(entry.header); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
if _, err := io.WriteString(tw, entry.contents); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
}
|
||||
// Only interested in Close failures for the small tests.
|
||||
if err := tw.Close(); err != nil && !big {
|
||||
t.Errorf("test %d: Failed closing archive: %v", i, err)
|
||||
continue testLoop
|
||||
}
|
||||
|
||||
actual := buf.Bytes()
|
||||
if !bytes.Equal(expected, actual) {
|
||||
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
|
||||
i, bytediff(expected, actual))
|
||||
}
|
||||
if testing.Short() { // The second test is expensive.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPax(t *testing.T) {
|
||||
// Create an archive with a large name
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written
|
||||
longName := strings.Repeat("ab", 100)
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
hdr.Name = longName
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long file name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxSymlink(t *testing.T) {
|
||||
// Create an archive with a large linkname
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Linkname != longLinkname {
|
||||
t.Fatal("Couldn't recover long link name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxNonAscii(t *testing.T) {
|
||||
// Create an archive with non ascii. These should trigger a pax header
|
||||
// because pax headers have a defined utf-8 encoding.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
|
||||
// some sample data
|
||||
chineseFilename := "文件名"
|
||||
chineseGroupname := "組"
|
||||
chineseUsername := "用戶名"
|
||||
|
||||
hdr.Name = chineseFilename
|
||||
hdr.Gname = chineseGroupname
|
||||
hdr.Uname = chineseUsername
|
||||
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != chineseFilename {
|
||||
t.Fatal("Couldn't recover unicode name")
|
||||
}
|
||||
if hdr.Gname != chineseGroupname {
|
||||
t.Fatal("Couldn't recover unicode group")
|
||||
}
|
||||
if hdr.Uname != chineseUsername {
|
||||
t.Fatal("Couldn't recover unicode user")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxXattrs(t *testing.T) {
|
||||
xattrs := map[string]string{
|
||||
"user.key": "value",
|
||||
}
|
||||
|
||||
// Create an archive with an xattr
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := "Kilts"
|
||||
hdr.Xattrs = xattrs
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get the xattrs back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxHeadersSorted(t *testing.T) {
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
hdr.Xattrs = map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
"baz": "baz",
|
||||
"qux": "qux",
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
|
||||
// xattr bar should always appear before others
|
||||
indices := []int{
|
||||
bytes.Index(buf.Bytes(), []byte("bar=bar")),
|
||||
bytes.Index(buf.Bytes(), []byte("baz=baz")),
|
||||
bytes.Index(buf.Bytes(), []byte("foo=foo")),
|
||||
bytes.Index(buf.Bytes(), []byte("qux=qux")),
|
||||
}
|
||||
if !sort.IntsAreSorted(indices) {
|
||||
t.Fatal("PAX headers are not sorted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUSTARLongName(t *testing.T) {
|
||||
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
hdr.Name = longName
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidTypeflagWithPAXHeader(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
fileName := strings.Repeat("ab", 100)
|
||||
|
||||
hdr := &Header{
|
||||
Name: fileName,
|
||||
Size: 4,
|
||||
Typeflag: 0,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
if _, err := tw.Write([]byte("fooo")); err != nil {
|
||||
t.Fatalf("Failed to write the file's data: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
|
||||
tr := NewReader(&buffer)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read header: %s", err)
|
||||
}
|
||||
if header.Typeflag != 0 {
|
||||
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAfterClose(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
hdr := &Header{
|
||||
Name: "small.txt",
|
||||
Size: 5,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
|
||||
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitUSTARPath(t *testing.T) {
|
||||
var sr = strings.Repeat
|
||||
|
||||
var vectors = []struct {
|
||||
input string // Input path
|
||||
prefix string // Expected output prefix
|
||||
suffix string // Expected output suffix
|
||||
ok bool // Split success?
|
||||
}{
|
||||
{"", "", "", false},
|
||||
{"abc", "", "", false},
|
||||
{"用戶名", "", "", false},
|
||||
{sr("a", fileNameSize), "", "", false},
|
||||
{sr("a", fileNameSize) + "/", "", "", false},
|
||||
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
|
||||
{sr("a", fileNamePrefixSize) + "/", "", "", false},
|
||||
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
|
||||
{sr("a", fileNameSize+1), "", "", false},
|
||||
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
|
||||
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
|
||||
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
|
||||
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
|
||||
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
prefix, suffix, ok := splitUSTARPath(v.input)
|
||||
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
|
||||
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
|
||||
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXRecord(t *testing.T) {
|
||||
var medName = strings.Repeat("CD", 50)
|
||||
var longName = strings.Repeat("AB", 100)
|
||||
|
||||
var vectors = []struct {
|
||||
inputKey string
|
||||
inputVal string
|
||||
output string
|
||||
}{
|
||||
{"k", "v", "6 k=v\n"},
|
||||
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
|
||||
{"path", longName, "210 path=" + longName + "\n"},
|
||||
{"path", medName, "110 path=" + medName + "\n"},
|
||||
{"foo", "ba", "9 foo=ba\n"},
|
||||
{"foo", "bar", "11 foo=bar\n"},
|
||||
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
|
||||
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
|
||||
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
|
||||
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
output := formatPAXRecord(v.inputKey, v.inputVal)
|
||||
if output != v.output {
|
||||
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
|
||||
v.inputKey, v.inputVal, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFitsInBase256(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
width int
|
||||
ok bool
|
||||
}{
|
||||
{+1, 8, true},
|
||||
{0, 8, true},
|
||||
{-1, 8, true},
|
||||
{1 << 56, 8, false},
|
||||
{(1 << 56) - 1, 8, true},
|
||||
{-1 << 56, 8, true},
|
||||
{(-1 << 56) - 1, 8, false},
|
||||
{121654, 8, true},
|
||||
{-9849849, 8, true},
|
||||
{math.MaxInt64, 9, true},
|
||||
{0, 9, true},
|
||||
{math.MinInt64, 9, true},
|
||||
{math.MaxInt64, 12, true},
|
||||
{0, 12, true},
|
||||
{math.MinInt64, 12, true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
ok := fitsInBase256(v.width, v.input)
|
||||
if ok != v.ok {
|
||||
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatNumeric(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
output string
|
||||
ok bool
|
||||
}{
|
||||
// Test base-256 (binary) encoded values.
|
||||
{-1, "\xff", true},
|
||||
{-1, "\xff\xff", true},
|
||||
{-1, "\xff\xff\xff", true},
|
||||
{(1 << 0), "0", false},
|
||||
{(1 << 8) - 1, "\x80\xff", true},
|
||||
{(1 << 8), "0\x00", false},
|
||||
{(1 << 16) - 1, "\x80\xff\xff", true},
|
||||
{(1 << 16), "00\x00", false},
|
||||
{-1 * (1 << 0), "\xff", true},
|
||||
{-1*(1<<0) - 1, "0", false},
|
||||
{-1 * (1 << 8), "\xff\x00", true},
|
||||
{-1*(1<<8) - 1, "0\x00", false},
|
||||
{-1 * (1 << 16), "\xff\x00\x00", true},
|
||||
{-1*(1<<16) - 1, "00\x00", false},
|
||||
{537795476381659745, "0000000\x00", false},
|
||||
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{-615126028225187231, "0000000\x00", false},
|
||||
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{math.MaxInt64, "0000000\x00", false},
|
||||
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "0000000\x00", false},
|
||||
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
var f formatter
|
||||
output := make([]byte, len(v.output))
|
||||
f.formatNumeric(output, v.input)
|
||||
ok := (f.err == nil)
|
||||
if ok != v.ok {
|
||||
if v.ok {
|
||||
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
|
||||
} else {
|
||||
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
|
||||
}
|
||||
}
|
||||
if string(output) != v.output {
|
||||
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXTime(t *testing.T) {
|
||||
t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC)
|
||||
t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
verify := func(time time.Time, s string) {
|
||||
p := formatPAXTime(time)
|
||||
if p != s {
|
||||
t.Errorf("for %v, expected %s, got %s", time, s, p)
|
||||
}
|
||||
}
|
||||
verify(t1, "946724400")
|
||||
verify(t2, "946724400.000000100")
|
||||
verify(t3, "-315579600")
|
||||
verify(t4, "0")
|
||||
}
|
||||
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
BackupEaData
|
||||
BackupSecurity
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
)
|
||||
|
||||
const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
}
|
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct {
|
||||
r io.Reader
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
if wsi.NameSize != 0 {
|
||||
name := make([]uint16, int(wsi.NameSize/2))
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Size -= 8
|
||||
}
|
||||
r.bytesLeft = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) {
|
||||
if r.bytesLeft == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > r.bytesLeft {
|
||||
b = b[:r.bytesLeft]
|
||||
}
|
||||
n, err := r.r.Read(b)
|
||||
r.bytesLeft -= int64(n)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if r.bytesLeft == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct {
|
||||
w io.Writer
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
|
||||
return &BackupStreamWriter{w, 0}
|
||||
}
|
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
if w.bytesLeft != 0 {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8
|
||||
}
|
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(name) != 0 {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.bytesLeft = hdr.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
|
||||
if w.bytesLeft < int64(len(b)) {
|
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
|
||||
}
|
||||
n, err := w.w.Write(b)
|
||||
w.bytesLeft -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
r := &BackupFileReader{f, includeSecurity, 0}
|
||||
return r
|
||||
}
|
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
return w
|
||||
}
|
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
return int(bytesWritten), errors.New("not all bytes could be written")
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
||||
255
vendor/github.com/Microsoft/go-winio/backup_test.go
generated
vendored
Normal file
255
vendor/github.com/Microsoft/go-winio/backup_test.go
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testFileName string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
f, err := ioutil.TempFile("", "tmp")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
testFileName = f.Name()
|
||||
f.Close()
|
||||
defer os.Remove(testFileName)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func makeTestFile(makeADS bool) error {
|
||||
os.Remove(testFileName)
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Write([]byte("testing 1 2 3\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if makeADS {
|
||||
a, err := os.Create(testFileName + ":ads.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer a.Close()
|
||||
_, err = a.Write([]byte("alternate data stream\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBackupRead(t *testing.T) {
|
||||
err := makeTestFile(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(b) == 0 {
|
||||
t.Fatal("no data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupStreamRead(t *testing.T) {
|
||||
err := makeTestFile(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
|
||||
br := NewBackupStreamReader(r)
|
||||
gotData := false
|
||||
gotAltData := false
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
switch hdr.Id {
|
||||
case BackupData:
|
||||
if gotData {
|
||||
t.Fatal("duplicate data")
|
||||
}
|
||||
if hdr.Name != "" {
|
||||
t.Fatalf("unexpected name %s", hdr.Name)
|
||||
}
|
||||
b, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != "testing 1 2 3\n" {
|
||||
t.Fatalf("incorrect data %v", b)
|
||||
}
|
||||
gotData = true
|
||||
case BackupAlternateData:
|
||||
if gotAltData {
|
||||
t.Fatal("duplicate alt data")
|
||||
}
|
||||
if hdr.Name != ":ads.txt:$DATA" {
|
||||
t.Fatalf("incorrect name %s", hdr.Name)
|
||||
}
|
||||
b, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != "alternate data stream\n" {
|
||||
t.Fatalf("incorrect data %v", b)
|
||||
}
|
||||
gotAltData = true
|
||||
default:
|
||||
t.Fatalf("unknown stream ID %d", hdr.Id)
|
||||
}
|
||||
}
|
||||
if !gotData || !gotAltData {
|
||||
t.Fatal("missing stream")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupStreamWrite(t *testing.T) {
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
w := NewBackupFileWriter(f, false)
|
||||
defer w.Close()
|
||||
|
||||
data := "testing 1 2 3\n"
|
||||
altData := "alternate stream\n"
|
||||
|
||||
br := NewBackupStreamWriter(w)
|
||||
err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := br.Write([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Fatal("short write")
|
||||
}
|
||||
|
||||
err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = br.Write([]byte(altData))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(altData) {
|
||||
t.Fatal("short write")
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
b, err := ioutil.ReadFile(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != data {
|
||||
t.Fatalf("wrong data %v", b)
|
||||
}
|
||||
|
||||
b, err = ioutil.ReadFile(testFileName + ":ads.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != altData {
|
||||
t.Fatalf("wrong data %v", b)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSparseFile() error {
|
||||
os.Remove(testFileName)
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
const (
|
||||
FSCTL_SET_SPARSE = 0x000900c4
|
||||
FSCTL_SET_ZERO_DATA = 0x000980c8
|
||||
)
|
||||
|
||||
err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte("testing 1 2 3\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Seek(1000000, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte("more data later\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBackupSparseFile(t *testing.T) {
|
||||
err := makeSparseFile()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
|
||||
br := NewBackupStreamReader(r)
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(hdr)
|
||||
}
|
||||
}
|
||||
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
// +build !windows
|
||||
// This file only exists to allow go get on non-Windows platforms.
|
||||
|
||||
package backuptar
|
||||
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
@@ -0,0 +1,439 @@
|
||||
// +build windows
|
||||
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
|
||||
)
|
||||
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFileAttributes = "fileattr"
|
||||
hdrSecurityDescriptor = "sd"
|
||||
hdrRawSecurityDescriptor = "rawsd"
|
||||
hdrMountPoint = "mountpoint"
|
||||
hdrEaPrefix = "xattr."
|
||||
)
|
||||
|
||||
func writeZeroes(w io.Writer, count int64) error {
|
||||
buf := make([]byte, 8192)
|
||||
c := len(buf)
|
||||
for i := int64(0); i < count; i += int64(c) {
|
||||
if int64(c) > count-i {
|
||||
c = int(count - i)
|
||||
}
|
||||
_, err := w.Write(buf[:c])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
curOffset := int64(0)
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id != winio.BackupSparseBlock {
|
||||
return fmt.Errorf("unexpected stream %d", bhdr.Id)
|
||||
}
|
||||
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
err = writeZeroes(t, bhdr.Offset-curOffset)
|
||||
if bhdr.Size == 0 {
|
||||
break
|
||||
}
|
||||
n, err := io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curOffset = bhdr.Offset + n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BasicInfoHeader creates a tar header from basic file information.
|
||||
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
|
||||
hdr := &tar.Header{
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
|
||||
Winheaders: make(map[string]string),
|
||||
}
|
||||
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= c_ISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
}
|
||||
return hdr
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
//
|
||||
// The additional Win32 metadata is:
|
||||
//
|
||||
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
//
|
||||
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
//
|
||||
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
|
||||
name = filepath.ToSlash(name)
|
||||
hdr := BasicInfoHeader(name, size, fileInfo)
|
||||
|
||||
// If r can be seeked, then this function is two-pass: pass 1 collects the
|
||||
// tar header data, and pass 2 copies the data stream. If r cannot be
|
||||
// seeked, then some header data (in particular EAs) will be silently lost.
|
||||
var (
|
||||
restartPos int64
|
||||
err error
|
||||
)
|
||||
sr, readTwice := r.(io.Seeker)
|
||||
if readTwice {
|
||||
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
|
||||
readTwice = false
|
||||
}
|
||||
}
|
||||
|
||||
br := winio.NewBackupStreamReader(r)
|
||||
var dataHdr *winio.BackupHeader
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupData:
|
||||
hdr.Mode |= c_ISREG
|
||||
if !readTwice {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
case winio.BackupSecurity:
|
||||
sd, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= c_ISLNK
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
reparseBuffer, err := ioutil.ReadAll(br)
|
||||
rp, err := winio.DecodeReparsePoint(reparseBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rp.IsMountPoint {
|
||||
hdr.Winheaders[hdrMountPoint] = "1"
|
||||
}
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
case winio.BackupEaData:
|
||||
eab, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eas, err := winio.DecodeExtendedAttributes(eab)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ea := range eas {
|
||||
// Use base64 encoding for the binary value. Note that there
|
||||
// is no way to encode the EA's flags, since their use doesn't
|
||||
// make any sense for persisted EAs.
|
||||
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
}
|
||||
|
||||
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if readTwice {
|
||||
// Get back to the data stream.
|
||||
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id == winio.BackupData {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dataHdr != nil {
|
||||
// A data stream was found. Copy the data.
|
||||
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
if size != dataHdr.Size {
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = copySparse(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for streams after the data stream. The only ones we handle are alternate data streams.
|
||||
// Other streams may have metadata that could be serialized, but the tar header has already
|
||||
// been written. In practice, this means that we don't get EA or TXF metadata.
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupAlternateData:
|
||||
altName := bhdr.Name
|
||||
if strings.HasSuffix(altName, ":$DATA") {
|
||||
altName = altName[:len(altName)-len(":$DATA")]
|
||||
}
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
hdr = &tar.Header{
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return errors.New("tar of sparse alternate data streams is unsupported")
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
|
||||
// WriteTarFileFromBackupStream.
|
||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
name = hdr.Name
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
|
||||
}
|
||||
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
|
||||
attr, err := strconv.ParseUint(attrStr, 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.FileAttributes = uint32(attr)
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
|
||||
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupSecurity,
|
||||
Size: int64(len(sd)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.Winheaders {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(eadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(reparse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
Size: hdr.Size,
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Copy all the alternate data streams and return the next non-ADS header.
|
||||
for {
|
||||
ahdr, err := t.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
|
||||
return ahdr, nil
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupAlternateData,
|
||||
Size: ahdr.Size,
|
||||
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar"
|
||||
)
|
||||
|
||||
func ensurePresent(t *testing.T, m map[string]string, keys ...string) {
|
||||
for _, k := range keys {
|
||||
if _, ok := m[k]; !ok {
|
||||
t.Error(k, "not present in tar header")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "tst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = f.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bi, err := winio.GetFileBasicInfo(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
br := winio.NewBackupFileReader(f, true)
|
||||
defer br.Close()
|
||||
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
|
||||
err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tr := tar.NewReader(&buf)
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
name, size, bi2, err := FileInfoFromHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if name != filepath.ToSlash(f.Name()) {
|
||||
t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name()))
|
||||
}
|
||||
|
||||
if size != fi.Size() {
|
||||
t.Errorf("got size %d, expected %d", size, fi.Size())
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(*bi, *bi2) {
|
||||
t.Errorf("got %#v, expected %#v", *bi, *bi2)
|
||||
}
|
||||
|
||||
ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd")
|
||||
}
|
||||
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
89
vendor/github.com/Microsoft/go-winio/ea_test.go
generated
vendored
Normal file
89
vendor/github.com/Microsoft/go-winio/ea_test.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
testEas = []ExtendedAttribute{
|
||||
{Name: "foo", Value: []byte("bar")},
|
||||
{Name: "fizz", Value: []byte("buzz")},
|
||||
}
|
||||
|
||||
testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0}
|
||||
testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3]
|
||||
testEasTruncated = testEasEncoded[0:20]
|
||||
)
|
||||
|
||||
func Test_RoundTripEas(t *testing.T) {
|
||||
b, err := EncodeExtendedAttributes(testEas)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(testEasEncoded, b) {
|
||||
t.Fatalf("encoded mismatch %v %v", testEasEncoded, b)
|
||||
}
|
||||
eas, err := DecodeExtendedAttributes(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(testEas, eas) {
|
||||
t.Fatalf("mismatch %+v %+v", testEas, eas)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_EasDontNeedPaddingAtEnd(t *testing.T) {
|
||||
eas, err := DecodeExtendedAttributes(testEasNotPadded)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(testEas, eas) {
|
||||
t.Fatalf("mismatch %+v %+v", testEas, eas)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_TruncatedEasFailCorrectly(t *testing.T) {
|
||||
_, err := DecodeExtendedAttributes(testEasTruncated)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NilEasEncodeAndDecodeAsNil(t *testing.T) {
|
||||
b, err := EncodeExtendedAttributes(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(b) != 0 {
|
||||
t.Fatal("expected empty")
|
||||
}
|
||||
eas, err := DecodeExtendedAttributes(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
t.Fatal("expected empty")
|
||||
}
|
||||
}
|
||||
|
||||
// Test_SetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile.
|
||||
func Test_SetFileEa(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "winio")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
ntdll := syscall.MustLoadDLL("ntdll.dll")
|
||||
ntSetEaFile := ntdll.MustFindProc("NtSetEaFile")
|
||||
var iosb [2]uintptr
|
||||
r, _, _ := ntSetEaFile.Call(f.Fd(), uintptr(unsafe.Pointer(&iosb[0])), uintptr(unsafe.Pointer(&testEasEncoded[0])), uintptr(len(testEasEncoded)))
|
||||
if r != 0 {
|
||||
t.Fatalf("NtSetEaFile failed with %08x", r)
|
||||
}
|
||||
}
|
||||
307
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
307
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
ErrTimeout = &timeoutError{}
|
||||
)
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIo() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go ioCompletionProcessor(h)
|
||||
}
|
||||
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
||||
type deadlineHandler struct {
|
||||
setLock sync.Mutex
|
||||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomicBool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIo)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.readDeadline.channel = make(timeoutChan)
|
||||
f.writeDeadline.channel = make(timeoutChan)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return makeWin32File(h)
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes a win32File.
|
||||
func (f *win32File) Close() error {
|
||||
f.closeHandle()
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
f.wgLock.RUnlock()
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
op.ch <- ioResult{bytes, err}
|
||||
}
|
||||
}
|
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING {
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
if d != nil {
|
||||
d.channelLock.Lock()
|
||||
timeout = d.channel
|
||||
d.channelLock.Unlock()
|
||||
}
|
||||
|
||||
var r ioResult
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE {
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *win32File) SetReadDeadline(deadline time.Time) error {
|
||||
return f.readDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||
return f.writeDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
if !d.timer.Stop() {
|
||||
<-d.channel
|
||||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.setFalse()
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
d.channelLock.Lock()
|
||||
d.channel = make(chan struct{})
|
||||
d.channelLock.Unlock()
|
||||
default:
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.setTrue()
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
duration := deadline.Sub(now)
|
||||
if deadline.After(now) {
|
||||
// Deadline is in the future, set a timer to wait
|
||||
d.timer = time.AfterFunc(duration, timeoutIO)
|
||||
} else {
|
||||
// Deadline is in the past. Cancel all pending IO now.
|
||||
timeoutIO()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||
|
||||
const (
|
||||
fileBasicInfo = 0
|
||||
fileIDInfo = 0x12
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
VolumeSerialNumber uint64
|
||||
FileID [16]byte
|
||||
}
|
||||
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return fileID, nil
|
||||
}
|
||||
15
vendor/github.com/Microsoft/go-winio/internal/etw/etw.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/internal/etw/etw.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Package etw provides support for TraceLogging-based ETW (Event Tracing
|
||||
// for Windows). TraceLogging is a format of ETW events that are self-describing
|
||||
// (the event contains information on its own schema). This allows them to be
|
||||
// decoded without needing a separate manifest with event information. The
|
||||
// implementation here is based on the information found in
|
||||
// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a
|
||||
// set of C macros.
|
||||
package etw
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go etw.go
|
||||
|
||||
//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister
|
||||
//sys eventUnregister(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister
|
||||
//sys eventWriteTransfer(providerHandle providerHandle, descriptor *EventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
||||
//sys eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
||||
65
vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go
generated
vendored
Normal file
65
vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// EventData maintains a buffer which builds up the data for an ETW event. It
|
||||
// needs to be paired with EventMetadata which describes the event.
|
||||
type EventData struct {
|
||||
buffer bytes.Buffer
|
||||
}
|
||||
|
||||
// Bytes returns the raw binary data containing the event data. The returned
|
||||
// value is not copied from the internal buffer, so it can be mutated by the
|
||||
// EventData object after it is returned.
|
||||
func (ed *EventData) Bytes() []byte {
|
||||
return ed.buffer.Bytes()
|
||||
}
|
||||
|
||||
// WriteString appends a string, including the null terminator, to the buffer.
|
||||
func (ed *EventData) WriteString(data string) {
|
||||
ed.buffer.WriteString(data)
|
||||
ed.buffer.WriteByte(0)
|
||||
}
|
||||
|
||||
// WriteInt8 appends a int8 to the buffer.
|
||||
func (ed *EventData) WriteInt8(value int8) {
|
||||
ed.buffer.WriteByte(uint8(value))
|
||||
}
|
||||
|
||||
// WriteInt16 appends a int16 to the buffer.
|
||||
func (ed *EventData) WriteInt16(value int16) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteInt32 appends a int32 to the buffer.
|
||||
func (ed *EventData) WriteInt32(value int32) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteInt64 appends a int64 to the buffer.
|
||||
func (ed *EventData) WriteInt64(value int64) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteUint8 appends a uint8 to the buffer.
|
||||
func (ed *EventData) WriteUint8(value uint8) {
|
||||
ed.buffer.WriteByte(value)
|
||||
}
|
||||
|
||||
// WriteUint16 appends a uint16 to the buffer.
|
||||
func (ed *EventData) WriteUint16(value uint16) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteUint32 appends a uint32 to the buffer.
|
||||
func (ed *EventData) WriteUint32(value uint32) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteUint64 appends a uint64 to the buffer.
|
||||
func (ed *EventData) WriteUint64(value uint64) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
29
vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go
generated
vendored
Normal file
29
vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type eventDataDescriptorType uint8
|
||||
|
||||
const (
|
||||
eventDataDescriptorTypeUserData eventDataDescriptorType = iota
|
||||
eventDataDescriptorTypeEventMetadata
|
||||
eventDataDescriptorTypeProviderMetadata
|
||||
)
|
||||
|
||||
type eventDataDescriptor struct {
|
||||
ptr ptr64
|
||||
size uint32
|
||||
dataType eventDataDescriptorType
|
||||
reserved1 uint8
|
||||
reserved2 uint16
|
||||
}
|
||||
|
||||
func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor {
|
||||
return eventDataDescriptor{
|
||||
ptr: ptr64{ptr: unsafe.Pointer(&buffer[0])},
|
||||
size: uint32(len(buffer)),
|
||||
dataType: dataType,
|
||||
}
|
||||
}
|
||||
67
vendor/github.com/Microsoft/go-winio/internal/etw/eventdescriptor.go
generated
vendored
Normal file
67
vendor/github.com/Microsoft/go-winio/internal/etw/eventdescriptor.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
package etw
|
||||
|
||||
// Channel represents the ETW logging channel that is used. It can be used by
|
||||
// event consumers to give an event special treatment.
|
||||
type Channel uint8
|
||||
|
||||
const (
|
||||
// ChannelTraceLogging is the default channel for TraceLogging events. It is
|
||||
// not required to be used for TraceLogging, but will prevent decoding
|
||||
// issues for these events on older operating systems.
|
||||
ChannelTraceLogging Channel = 11
|
||||
)
|
||||
|
||||
// Level represents the ETW logging level. There are several predefined levels
|
||||
// that are commonly used, but technically anything from 0-255 is allowed.
|
||||
// Lower levels indicate more important events, and 0 indicates an event that
|
||||
// will always be collected.
|
||||
type Level uint8
|
||||
|
||||
// Predefined ETW log levels.
|
||||
const (
|
||||
LevelAlways Level = iota
|
||||
LevelCritical
|
||||
LevelError
|
||||
LevelWarning
|
||||
LevelInfo
|
||||
LevelVerbose
|
||||
)
|
||||
|
||||
// EventDescriptor represents various metadata for an ETW event.
|
||||
type EventDescriptor struct {
|
||||
id uint16
|
||||
version uint8
|
||||
Channel Channel
|
||||
Level Level
|
||||
Opcode uint8
|
||||
Task uint16
|
||||
Keyword uint64
|
||||
}
|
||||
|
||||
// NewEventDescriptor returns an EventDescriptor initialized for use with
|
||||
// TraceLogging.
|
||||
func NewEventDescriptor() *EventDescriptor {
|
||||
// Standard TraceLogging events default to the TraceLogging channel, and
|
||||
// verbose level.
|
||||
return &EventDescriptor{
|
||||
Channel: ChannelTraceLogging,
|
||||
Level: LevelVerbose,
|
||||
}
|
||||
}
|
||||
|
||||
// Identity returns the identity of the event. If the identity is not 0, it
|
||||
// should uniquely identify the other event metadata (contained in
|
||||
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
|
||||
// are relevant.
|
||||
func (ed *EventDescriptor) Identity() uint32 {
|
||||
return (uint32(ed.version) << 16) | uint32(ed.id)
|
||||
}
|
||||
|
||||
// SetIdentity sets the identity of the event. If the identity is not 0, it
|
||||
// should uniquely identify the other event metadata (contained in
|
||||
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
|
||||
// are relevant.
|
||||
func (ed *EventDescriptor) SetIdentity(identity uint32) {
|
||||
ed.id = uint16(identity)
|
||||
ed.version = uint8(identity >> 16)
|
||||
}
|
||||
177
vendor/github.com/Microsoft/go-winio/internal/etw/eventmetadata.go
generated
vendored
Normal file
177
vendor/github.com/Microsoft/go-winio/internal/etw/eventmetadata.go
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// InType indicates the type of data contained in the ETW event.
|
||||
type InType byte
|
||||
|
||||
// Various InType definitions for TraceLogging. These must match the definitions
|
||||
// found in TraceLoggingProvider.h in the Windows SDK.
|
||||
const (
|
||||
InTypeNull InType = iota
|
||||
InTypeUnicodeString
|
||||
InTypeANSIString
|
||||
InTypeInt8
|
||||
InTypeUint8
|
||||
InTypeInt16
|
||||
InTypeUint16
|
||||
InTypeInt32
|
||||
InTypeUint32
|
||||
InTypeInt64
|
||||
InTypeUint64
|
||||
InTypeFloat
|
||||
InTypeDouble
|
||||
InTypeBool32
|
||||
InTypeBinary
|
||||
InTypeGUID
|
||||
InTypePointerUnsupported
|
||||
InTypeFileTime
|
||||
InTypeSystemTime
|
||||
InTypeSID
|
||||
InTypeHexInt32
|
||||
InTypeHexInt64
|
||||
InTypeCountedString
|
||||
InTypeCountedANSIString
|
||||
InTypeStruct
|
||||
InTypeCountedBinary
|
||||
InTypeCountedArray InType = 32
|
||||
InTypeArray InType = 64
|
||||
)
|
||||
|
||||
// OutType specifies a hint to the event decoder for how the value should be
|
||||
// formatted.
|
||||
type OutType byte
|
||||
|
||||
// Various OutType definitions for TraceLogging. These must match the
|
||||
// definitions found in TraceLoggingProvider.h in the Windows SDK.
|
||||
const (
|
||||
// OutTypeDefault indicates that the default formatting for the InType will
|
||||
// be used by the event decoder.
|
||||
OutTypeDefault OutType = iota
|
||||
OutTypeNoPrint
|
||||
OutTypeString
|
||||
OutTypeBoolean
|
||||
OutTypeHex
|
||||
OutTypePID
|
||||
OutTypeTID
|
||||
OutTypePort
|
||||
OutTypeIPv4
|
||||
OutTypeIPv6
|
||||
OutTypeSocketAddress
|
||||
OutTypeXML
|
||||
OutTypeJSON
|
||||
OutTypeWin32Error
|
||||
OutTypeNTStatus
|
||||
OutTypeHResult
|
||||
OutTypeFileTime
|
||||
OutTypeSigned
|
||||
OutTypeUnsigned
|
||||
OutTypeUTF8 OutType = 35
|
||||
OutTypePKCS7WithTypeInfo OutType = 36
|
||||
OutTypeCodePointer OutType = 37
|
||||
OutTypeDateTimeUTC OutType = 38
|
||||
)
|
||||
|
||||
// EventMetadata maintains a buffer which builds up the metadata for an ETW
|
||||
// event. It needs to be paired with EventData which describes the event.
|
||||
type EventMetadata struct {
|
||||
buffer bytes.Buffer
|
||||
}
|
||||
|
||||
// Bytes returns the raw binary data containing the event metadata. Before being
|
||||
// returned, the current size of the buffer is written to the start of the
|
||||
// buffer. The returned value is not copied from the internal buffer, so it can
|
||||
// be mutated by the EventMetadata object after it is returned.
|
||||
func (em *EventMetadata) Bytes() []byte {
|
||||
// Finalize the event metadata buffer by filling in the buffer length at the
|
||||
// beginning.
|
||||
binary.LittleEndian.PutUint16(em.buffer.Bytes(), uint16(em.buffer.Len()))
|
||||
return em.buffer.Bytes()
|
||||
}
|
||||
|
||||
// WriteEventHeader writes the metadata for the start of an event to the buffer.
|
||||
// This specifies the event name and tags.
|
||||
func (em *EventMetadata) WriteEventHeader(name string, tags uint32) {
|
||||
binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder
|
||||
em.writeTags(tags)
|
||||
em.buffer.WriteString(name)
|
||||
em.buffer.WriteByte(0) // Null terminator for name
|
||||
}
|
||||
|
||||
func (em *EventMetadata) writeField(name string, inType InType, outType OutType, tags uint32, arrSize uint16) {
|
||||
em.buffer.WriteString(name)
|
||||
em.buffer.WriteByte(0) // Null terminator for name
|
||||
|
||||
if outType == OutTypeDefault && tags == 0 {
|
||||
em.buffer.WriteByte(byte(inType))
|
||||
} else {
|
||||
em.buffer.WriteByte(byte(inType | 128))
|
||||
if tags == 0 {
|
||||
em.buffer.WriteByte(byte(outType))
|
||||
} else {
|
||||
em.buffer.WriteByte(byte(outType | 128))
|
||||
em.writeTags(tags)
|
||||
}
|
||||
}
|
||||
|
||||
if arrSize != 0 {
|
||||
binary.Write(&em.buffer, binary.LittleEndian, arrSize)
|
||||
}
|
||||
}
|
||||
|
||||
// writeTags writes out the tags value to the event metadata. Tags is a 28-bit
|
||||
// value, interpreted as bit flags, which are only relevant to the event
|
||||
// consumer. The event consumer may choose to attribute special meaning to tags
|
||||
// (e.g. 0x4 could mean the field contains PII). Tags are written as a series of
|
||||
// bytes, each containing 7 bits of tag value, with the high bit set if there is
|
||||
// more tag data in the following byte. This allows for a more compact
|
||||
// representation when not all of the tag bits are needed.
|
||||
func (em *EventMetadata) writeTags(tags uint32) {
|
||||
// Only use the top 28 bits of the tags value.
|
||||
tags &= 0xfffffff
|
||||
|
||||
for {
|
||||
// Tags are written with the most significant bits (e.g. 21-27) first.
|
||||
val := tags >> 21
|
||||
|
||||
if tags&0x1fffff == 0 {
|
||||
// If there is no more data to write after this, write this value
|
||||
// without the high bit set, and return.
|
||||
em.buffer.WriteByte(byte(val & 0x7f))
|
||||
return
|
||||
}
|
||||
|
||||
em.buffer.WriteByte(byte(val | 0x80))
|
||||
|
||||
tags <<= 7
|
||||
}
|
||||
}
|
||||
|
||||
// WriteField writes the metadata for a simple field to the buffer.
|
||||
func (em *EventMetadata) WriteField(name string, inType InType, outType OutType, tags uint32) {
|
||||
em.writeField(name, inType, outType, tags, 0)
|
||||
}
|
||||
|
||||
// WriteArray writes the metadata for an array field to the buffer. The number
|
||||
// of elements in the array must be written as a uint16 in the event data,
|
||||
// immediately preceeding the event data.
|
||||
func (em *EventMetadata) WriteArray(name string, inType InType, outType OutType, tags uint32) {
|
||||
em.writeField(name, inType|InTypeArray, outType, tags, 0)
|
||||
}
|
||||
|
||||
// WriteCountedArray writes the metadata for an array field to the buffer. The
|
||||
// size of a counted array is fixed, and the size is written into the metadata
|
||||
// directly.
|
||||
func (em *EventMetadata) WriteCountedArray(name string, count uint16, inType InType, outType OutType, tags uint32) {
|
||||
em.writeField(name, inType|InTypeCountedArray, outType, tags, count)
|
||||
}
|
||||
|
||||
// WriteStruct writes the metadata for a nested struct to the buffer. The struct
|
||||
// contains the next N fields in the metadata, where N is specified by the
|
||||
// fieldCount argument.
|
||||
func (em *EventMetadata) WriteStruct(name string, fieldCount uint8, tags uint32) {
|
||||
em.writeField(name, InTypeStruct, OutType(fieldCount), tags, 0)
|
||||
}
|
||||
63
vendor/github.com/Microsoft/go-winio/internal/etw/eventopt.go
generated
vendored
Normal file
63
vendor/github.com/Microsoft/go-winio/internal/etw/eventopt.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type eventOptions struct {
|
||||
descriptor *EventDescriptor
|
||||
activityID *windows.GUID
|
||||
relatedActivityID *windows.GUID
|
||||
tags uint32
|
||||
}
|
||||
|
||||
// EventOpt defines the option function type that can be passed to
|
||||
// Provider.WriteEvent to specify general event options, such as level and
|
||||
// keyword.
|
||||
type EventOpt func(options *eventOptions)
|
||||
|
||||
// WithEventOpts returns the variadic arguments as a single slice.
|
||||
func WithEventOpts(opts ...EventOpt) []EventOpt {
|
||||
return opts
|
||||
}
|
||||
|
||||
// WithLevel specifies the level of the event to be written.
|
||||
func WithLevel(level Level) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.descriptor.Level = level
|
||||
}
|
||||
}
|
||||
|
||||
// WithKeyword specifies the keywords of the event to be written. Multiple uses
|
||||
// of this option are OR'd together.
|
||||
func WithKeyword(keyword uint64) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.descriptor.Keyword |= keyword
|
||||
}
|
||||
}
|
||||
|
||||
func WithChannel(channel Channel) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.descriptor.Channel = channel
|
||||
}
|
||||
}
|
||||
|
||||
// WithTags specifies the tags of the event to be written. Tags is a 28-bit
|
||||
// value (top 4 bits are ignored) which are interpreted by the event consumer.
|
||||
func WithTags(newTags uint32) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.tags |= newTags
|
||||
}
|
||||
}
|
||||
|
||||
func WithActivityID(activityID *windows.GUID) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.activityID = activityID
|
||||
}
|
||||
}
|
||||
|
||||
func WithRelatedActivityID(activityID *windows.GUID) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.relatedActivityID = activityID
|
||||
}
|
||||
}
|
||||
379
vendor/github.com/Microsoft/go-winio/internal/etw/fieldopt.go
generated
vendored
Normal file
379
vendor/github.com/Microsoft/go-winio/internal/etw/fieldopt.go
generated
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"math"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// FieldOpt defines the option function type that can be passed to
|
||||
// Provider.WriteEvent to add fields to the event.
|
||||
type FieldOpt func(em *EventMetadata, ed *EventData)
|
||||
|
||||
// WithFields returns the variadic arguments as a single slice.
|
||||
func WithFields(opts ...FieldOpt) []FieldOpt {
|
||||
return opts
|
||||
}
|
||||
|
||||
// BoolField adds a single bool field to the event.
|
||||
func BoolField(name string, value bool) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeUint8, OutTypeBoolean, 0)
|
||||
bool8 := uint8(0)
|
||||
if value {
|
||||
bool8 = uint8(1)
|
||||
}
|
||||
ed.WriteUint8(bool8)
|
||||
}
|
||||
}
|
||||
|
||||
// BoolArray adds an array of bool to the event.
|
||||
func BoolArray(name string, values []bool) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeUint8, OutTypeBoolean, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
bool8 := uint8(0)
|
||||
if v {
|
||||
bool8 = uint8(1)
|
||||
}
|
||||
ed.WriteUint8(bool8)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StringField adds a single string field to the event.
|
||||
func StringField(name string, value string) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeANSIString, OutTypeUTF8, 0)
|
||||
ed.WriteString(value)
|
||||
}
|
||||
}
|
||||
|
||||
// StringArray adds an array of string to the event.
|
||||
func StringArray(name string, values []string) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeANSIString, OutTypeUTF8, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteString(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IntField adds a single int field to the event.
|
||||
func IntField(name string, value int) FieldOpt {
|
||||
switch unsafe.Sizeof(value) {
|
||||
case 4:
|
||||
return Int32Field(name, int32(value))
|
||||
case 8:
|
||||
return Int64Field(name, int64(value))
|
||||
default:
|
||||
panic("Unsupported int size")
|
||||
}
|
||||
}
|
||||
|
||||
// IntArray adds an array of int to the event.
|
||||
func IntArray(name string, values []int) FieldOpt {
|
||||
inType := InTypeNull
|
||||
var writeItem func(*EventData, int)
|
||||
switch unsafe.Sizeof(values[0]) {
|
||||
case 4:
|
||||
inType = InTypeInt32
|
||||
writeItem = func(ed *EventData, item int) { ed.WriteInt32(int32(item)) }
|
||||
case 8:
|
||||
inType = InTypeInt64
|
||||
writeItem = func(ed *EventData, item int) { ed.WriteInt64(int64(item)) }
|
||||
default:
|
||||
panic("Unsupported int size")
|
||||
}
|
||||
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, inType, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
writeItem(ed, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int8Field adds a single int8 field to the event.
|
||||
func Int8Field(name string, value int8) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeInt8, OutTypeDefault, 0)
|
||||
ed.WriteInt8(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int8Array adds an array of int8 to the event.
|
||||
func Int8Array(name string, values []int8) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeInt8, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteInt8(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int16Field adds a single int16 field to the event.
|
||||
func Int16Field(name string, value int16) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeInt16, OutTypeDefault, 0)
|
||||
ed.WriteInt16(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int16Array adds an array of int16 to the event.
|
||||
func Int16Array(name string, values []int16) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeInt16, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteInt16(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int32Field adds a single int32 field to the event.
|
||||
func Int32Field(name string, value int32) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeInt32, OutTypeDefault, 0)
|
||||
ed.WriteInt32(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int32Array adds an array of int32 to the event.
|
||||
func Int32Array(name string, values []int32) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeInt32, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteInt32(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int64Field adds a single int64 field to the event.
|
||||
func Int64Field(name string, value int64) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeInt64, OutTypeDefault, 0)
|
||||
ed.WriteInt64(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int64Array adds an array of int64 to the event.
|
||||
func Int64Array(name string, values []int64) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeInt64, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteInt64(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UintField adds a single uint field to the event.
|
||||
func UintField(name string, value uint) FieldOpt {
|
||||
switch unsafe.Sizeof(value) {
|
||||
case 4:
|
||||
return Uint32Field(name, uint32(value))
|
||||
case 8:
|
||||
return Uint64Field(name, uint64(value))
|
||||
default:
|
||||
panic("Unsupported uint size")
|
||||
}
|
||||
}
|
||||
|
||||
// UintArray adds an array of uint to the event.
|
||||
func UintArray(name string, values []uint) FieldOpt {
|
||||
inType := InTypeNull
|
||||
var writeItem func(*EventData, uint)
|
||||
switch unsafe.Sizeof(values[0]) {
|
||||
case 4:
|
||||
inType = InTypeUint32
|
||||
writeItem = func(ed *EventData, item uint) { ed.WriteUint32(uint32(item)) }
|
||||
case 8:
|
||||
inType = InTypeUint64
|
||||
writeItem = func(ed *EventData, item uint) { ed.WriteUint64(uint64(item)) }
|
||||
default:
|
||||
panic("Unsupported uint size")
|
||||
}
|
||||
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, inType, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
writeItem(ed, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint8Field adds a single uint8 field to the event.
|
||||
func Uint8Field(name string, value uint8) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeUint8, OutTypeDefault, 0)
|
||||
ed.WriteUint8(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint8Array adds an array of uint8 to the event.
|
||||
func Uint8Array(name string, values []uint8) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeUint8, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteUint8(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint16Field adds a single uint16 field to the event.
|
||||
func Uint16Field(name string, value uint16) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeUint16, OutTypeDefault, 0)
|
||||
ed.WriteUint16(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint16Array adds an array of uint16 to the event.
|
||||
func Uint16Array(name string, values []uint16) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeUint16, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteUint16(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint32Field adds a single uint32 field to the event.
|
||||
func Uint32Field(name string, value uint32) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeUint32, OutTypeDefault, 0)
|
||||
ed.WriteUint32(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint32Array adds an array of uint32 to the event.
|
||||
func Uint32Array(name string, values []uint32) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeUint32, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteUint32(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint64Field adds a single uint64 field to the event.
|
||||
func Uint64Field(name string, value uint64) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeUint64, OutTypeDefault, 0)
|
||||
ed.WriteUint64(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint64Array adds an array of uint64 to the event.
|
||||
func Uint64Array(name string, values []uint64) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeUint64, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteUint64(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UintptrField adds a single uintptr field to the event.
|
||||
func UintptrField(name string, value uintptr) FieldOpt {
|
||||
inType := InTypeNull
|
||||
var writeItem func(*EventData, uintptr)
|
||||
switch unsafe.Sizeof(value) {
|
||||
case 4:
|
||||
inType = InTypeHexInt32
|
||||
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint32(uint32(item)) }
|
||||
case 8:
|
||||
inType = InTypeHexInt64
|
||||
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint64(uint64(item)) }
|
||||
default:
|
||||
panic("Unsupported uintptr size")
|
||||
}
|
||||
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, inType, OutTypeDefault, 0)
|
||||
writeItem(ed, value)
|
||||
}
|
||||
}
|
||||
|
||||
// UintptrArray adds an array of uintptr to the event.
|
||||
func UintptrArray(name string, values []uintptr) FieldOpt {
|
||||
inType := InTypeNull
|
||||
var writeItem func(*EventData, uintptr)
|
||||
switch unsafe.Sizeof(values[0]) {
|
||||
case 4:
|
||||
inType = InTypeHexInt32
|
||||
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint32(uint32(item)) }
|
||||
case 8:
|
||||
inType = InTypeHexInt64
|
||||
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint64(uint64(item)) }
|
||||
default:
|
||||
panic("Unsupported uintptr size")
|
||||
}
|
||||
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, inType, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
writeItem(ed, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Float32Field adds a single float32 field to the event.
|
||||
func Float32Field(name string, value float32) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeFloat, OutTypeDefault, 0)
|
||||
ed.WriteUint32(math.Float32bits(value))
|
||||
}
|
||||
}
|
||||
|
||||
// Float32Array adds an array of float32 to the event.
|
||||
func Float32Array(name string, values []float32) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeFloat, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteUint32(math.Float32bits(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Float64Field adds a single float64 field to the event.
|
||||
func Float64Field(name string, value float64) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteField(name, InTypeDouble, OutTypeDefault, 0)
|
||||
ed.WriteUint64(math.Float64bits(value))
|
||||
}
|
||||
}
|
||||
|
||||
// Float64Array adds an array of float64 to the event.
|
||||
func Float64Array(name string, values []float64) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteArray(name, InTypeDouble, OutTypeDefault, 0)
|
||||
ed.WriteUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.WriteUint64(math.Float64bits(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Struct adds a nested struct to the event, the FieldOpts in the opts argument
|
||||
// are used to specify the fields of the struct.
|
||||
func Struct(name string, opts ...FieldOpt) FieldOpt {
|
||||
return func(em *EventMetadata, ed *EventData) {
|
||||
em.WriteStruct(name, uint8(len(opts)), 0)
|
||||
for _, opt := range opts {
|
||||
opt(em, ed)
|
||||
}
|
||||
}
|
||||
}
|
||||
279
vendor/github.com/Microsoft/go-winio/internal/etw/provider.go
generated
vendored
Normal file
279
vendor/github.com/Microsoft/go-winio/internal/etw/provider.go
generated
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Provider represents an ETW event provider. It is identified by a provider
|
||||
// name and ID (GUID), which should always have a 1:1 mapping to each other
|
||||
// (e.g. don't use multiple provider names with the same ID, or vice versa).
|
||||
type Provider struct {
|
||||
ID *windows.GUID
|
||||
handle providerHandle
|
||||
metadata []byte
|
||||
callback EnableCallback
|
||||
index uint
|
||||
enabled bool
|
||||
level Level
|
||||
keywordAny uint64
|
||||
keywordAll uint64
|
||||
}
|
||||
|
||||
// String returns the `provider`.ID as a string
|
||||
func (provider *Provider) String() string {
|
||||
data1 := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(data1, provider.ID.Data1)
|
||||
data2 := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(data2, provider.ID.Data2)
|
||||
data3 := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(data3, provider.ID.Data3)
|
||||
return fmt.Sprintf(
|
||||
"%s-%s-%s-%s-%s",
|
||||
hex.EncodeToString(data1),
|
||||
hex.EncodeToString(data2),
|
||||
hex.EncodeToString(data3),
|
||||
hex.EncodeToString(provider.ID.Data4[:2]),
|
||||
hex.EncodeToString(provider.ID.Data4[2:]))
|
||||
}
|
||||
|
||||
type providerHandle windows.Handle
|
||||
|
||||
// ProviderState informs the provider EnableCallback what action is being
|
||||
// performed.
|
||||
type ProviderState uint32
|
||||
|
||||
const (
|
||||
// ProviderStateDisable indicates the provider is being disabled.
|
||||
ProviderStateDisable ProviderState = iota
|
||||
// ProviderStateEnable indicates the provider is being enabled.
|
||||
ProviderStateEnable
|
||||
// ProviderStateCaptureState indicates the provider is having its current
|
||||
// state snap-shotted.
|
||||
ProviderStateCaptureState
|
||||
)
|
||||
|
||||
type eventInfoClass uint32
|
||||
|
||||
const (
|
||||
eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota
|
||||
eventInfoClassProviderSetReserved1
|
||||
eventInfoClassProviderSetTraits
|
||||
eventInfoClassProviderUseDescriptorType
|
||||
)
|
||||
|
||||
// EnableCallback is the form of the callback function that receives provider
|
||||
// enable/disable notifications from ETW.
|
||||
type EnableCallback func(*windows.GUID, ProviderState, Level, uint64, uint64, uintptr)
|
||||
|
||||
func providerCallback(sourceID *windows.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) {
|
||||
provider := providers.getProvider(uint(i))
|
||||
|
||||
switch state {
|
||||
case ProviderStateDisable:
|
||||
provider.enabled = false
|
||||
case ProviderStateEnable:
|
||||
provider.enabled = true
|
||||
provider.level = level
|
||||
provider.keywordAny = matchAnyKeyword
|
||||
provider.keywordAll = matchAllKeyword
|
||||
}
|
||||
|
||||
if provider.callback != nil {
|
||||
provider.callback(sourceID, state, level, matchAnyKeyword, matchAllKeyword, filterData)
|
||||
}
|
||||
}
|
||||
|
||||
// providerCallbackAdapter acts as the first-level callback from the C/ETW side
|
||||
// for provider notifications. Because Go has trouble with callback arguments of
|
||||
// different size, it has only pointer-sized arguments, which are then cast to
|
||||
// the appropriate types when calling providerCallback.
|
||||
func providerCallbackAdapter(sourceID *windows.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr {
|
||||
providerCallback(sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i)
|
||||
return 0
|
||||
}
|
||||
|
||||
// providerIDFromName generates a provider ID based on the provider name. It
|
||||
// uses the same algorithm as used by .NET's EventSource class, which is based
|
||||
// on RFC 4122. More information on the algorithm can be found here:
|
||||
// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/
|
||||
// The algorithm is roughly:
|
||||
// Hash = Sha1(namespace + arg.ToUpper().ToUtf16be())
|
||||
// Guid = Hash[0..15], with Hash[7] tweaked according to RFC 4122
|
||||
func providerIDFromName(name string) *windows.GUID {
|
||||
buffer := sha1.New()
|
||||
|
||||
namespace := []byte{0x48, 0x2C, 0x2D, 0xB2, 0xC3, 0x90, 0x47, 0xC8, 0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}
|
||||
buffer.Write(namespace)
|
||||
|
||||
binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name))))
|
||||
|
||||
sum := buffer.Sum(nil)
|
||||
sum[7] = (sum[7] & 0xf) | 0x50
|
||||
|
||||
return &windows.GUID{
|
||||
Data1: binary.LittleEndian.Uint32(sum[0:4]),
|
||||
Data2: binary.LittleEndian.Uint16(sum[4:6]),
|
||||
Data3: binary.LittleEndian.Uint16(sum[6:8]),
|
||||
Data4: [8]byte{sum[8], sum[9], sum[10], sum[11], sum[12], sum[13], sum[14], sum[15]},
|
||||
}
|
||||
}
|
||||
|
||||
// NewProvider creates and registers a new ETW provider. The provider ID is
|
||||
// generated based on the provider name.
|
||||
func NewProvider(name string, callback EnableCallback) (provider *Provider, err error) {
|
||||
return NewProviderWithID(name, providerIDFromName(name), callback)
|
||||
}
|
||||
|
||||
// NewProviderWithID creates and registers a new ETW provider, allowing the
|
||||
// provider ID to be manually specified. This is most useful when there is an
|
||||
// existing provider ID that must be used to conform to existing diagnostic
|
||||
// infrastructure.
|
||||
func NewProviderWithID(name string, id *windows.GUID, callback EnableCallback) (provider *Provider, err error) {
|
||||
providerCallbackOnce.Do(func() {
|
||||
globalProviderCallback = windows.NewCallback(providerCallbackAdapter)
|
||||
})
|
||||
|
||||
provider = providers.newProvider()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
providers.removeProvider(provider)
|
||||
}
|
||||
}()
|
||||
provider.ID = id
|
||||
provider.callback = callback
|
||||
|
||||
if err := eventRegister(provider.ID, globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metadata := &bytes.Buffer{}
|
||||
binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later)
|
||||
metadata.WriteString(name)
|
||||
metadata.WriteByte(0) // Null terminator for name
|
||||
binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer
|
||||
provider.metadata = metadata.Bytes()
|
||||
|
||||
if err := eventSetInformation(
|
||||
provider.handle,
|
||||
eventInfoClassProviderSetTraits,
|
||||
uintptr(unsafe.Pointer(&provider.metadata[0])),
|
||||
uint32(len(provider.metadata))); err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
// Close unregisters the provider.
|
||||
func (provider *Provider) Close() error {
|
||||
providers.removeProvider(provider)
|
||||
return eventUnregister(provider.handle)
|
||||
}
|
||||
|
||||
// IsEnabled calls IsEnabledForLevelAndKeywords with LevelAlways and all
|
||||
// keywords set.
|
||||
func (provider *Provider) IsEnabled() bool {
|
||||
return provider.IsEnabledForLevelAndKeywords(LevelAlways, ^uint64(0))
|
||||
}
|
||||
|
||||
// IsEnabledForLevel calls IsEnabledForLevelAndKeywords with the specified level
|
||||
// and all keywords set.
|
||||
func (provider *Provider) IsEnabledForLevel(level Level) bool {
|
||||
return provider.IsEnabledForLevelAndKeywords(level, ^uint64(0))
|
||||
}
|
||||
|
||||
// IsEnabledForLevelAndKeywords allows event producer code to check if there are
|
||||
// any event sessions that are interested in an event, based on the event level
|
||||
// and keywords. Although this check happens automatically in the ETW
|
||||
// infrastructure, it can be useful to check if an event will actually be
|
||||
// consumed before doing expensive work to build the event data.
|
||||
func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool {
|
||||
if !provider.enabled {
|
||||
return false
|
||||
}
|
||||
|
||||
// ETW automatically sets the level to 255 if it is specified as 0, so we
|
||||
// don't need to worry about the level=0 (all events) case.
|
||||
if level > provider.level {
|
||||
return false
|
||||
}
|
||||
|
||||
if keywords != 0 && (keywords&provider.keywordAny == 0 || keywords&provider.keywordAll != provider.keywordAll) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteEvent writes a single ETW event from the provider. The event is
|
||||
// constructed based on the EventOpt and FieldOpt values that are passed as
|
||||
// opts.
|
||||
func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error {
|
||||
options := eventOptions{descriptor: NewEventDescriptor()}
|
||||
em := &EventMetadata{}
|
||||
ed := &EventData{}
|
||||
|
||||
// We need to evaluate the EventOpts first since they might change tags, and
|
||||
// we write out the tags before evaluating FieldOpts.
|
||||
for _, opt := range eventOpts {
|
||||
opt(&options)
|
||||
}
|
||||
|
||||
if !provider.IsEnabledForLevelAndKeywords(options.descriptor.Level, options.descriptor.Keyword) {
|
||||
return nil
|
||||
}
|
||||
|
||||
em.WriteEventHeader(name, options.tags)
|
||||
|
||||
for _, opt := range fieldOpts {
|
||||
opt(em, ed)
|
||||
}
|
||||
|
||||
// Don't pass a data blob if there is no event data. There will always be
|
||||
// event metadata (e.g. for the name) so we don't need to do this check for
|
||||
// the metadata.
|
||||
dataBlobs := [][]byte{}
|
||||
if len(ed.Bytes()) > 0 {
|
||||
dataBlobs = [][]byte{ed.Bytes()}
|
||||
}
|
||||
|
||||
return provider.WriteEventRaw(options.descriptor, nil, nil, [][]byte{em.Bytes()}, dataBlobs)
|
||||
}
|
||||
|
||||
// WriteEventRaw writes a single ETW event from the provider. This function is
|
||||
// less abstracted than WriteEvent, and presents a fairly direct interface to
|
||||
// the event writing functionality. It expects a series of event metadata and
|
||||
// event data blobs to be passed in, which must conform to the TraceLogging
|
||||
// schema. The functions on EventMetadata and EventData can help with creating
|
||||
// these blobs. The blobs of each type are effectively concatenated together by
|
||||
// the ETW infrastructure.
|
||||
func (provider *Provider) WriteEventRaw(
|
||||
descriptor *EventDescriptor,
|
||||
activityID *windows.GUID,
|
||||
relatedActivityID *windows.GUID,
|
||||
metadataBlobs [][]byte,
|
||||
dataBlobs [][]byte) error {
|
||||
|
||||
dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs))
|
||||
dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount)
|
||||
|
||||
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata))
|
||||
for _, blob := range metadataBlobs {
|
||||
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob))
|
||||
}
|
||||
for _, blob := range dataBlobs {
|
||||
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob))
|
||||
}
|
||||
|
||||
return eventWriteTransfer(provider.handle, descriptor, activityID, relatedActivityID, dataDescriptorCount, &dataDescriptors[0])
|
||||
}
|
||||
52
vendor/github.com/Microsoft/go-winio/internal/etw/providerglobal.go
generated
vendored
Normal file
52
vendor/github.com/Microsoft/go-winio/internal/etw/providerglobal.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Because the provider callback function needs to be able to access the
|
||||
// provider data when it is invoked by ETW, we need to keep provider data stored
|
||||
// in a global map based on an index. The index is passed as the callback
|
||||
// context to ETW.
|
||||
type providerMap struct {
|
||||
m map[uint]*Provider
|
||||
i uint
|
||||
lock sync.Mutex
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
var providers = providerMap{
|
||||
m: make(map[uint]*Provider),
|
||||
}
|
||||
|
||||
func (p *providerMap) newProvider() *Provider {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
i := p.i
|
||||
p.i++
|
||||
|
||||
provider := &Provider{
|
||||
index: i,
|
||||
}
|
||||
|
||||
p.m[i] = provider
|
||||
return provider
|
||||
}
|
||||
|
||||
func (p *providerMap) removeProvider(provider *Provider) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
delete(p.m, provider.index)
|
||||
}
|
||||
|
||||
func (p *providerMap) getProvider(index uint) *Provider {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
return p.m[index]
|
||||
}
|
||||
|
||||
var providerCallbackOnce sync.Once
|
||||
var globalProviderCallback uintptr
|
||||
16
vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_32.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_32.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// +build 386 arm
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
|
||||
// be 64 bits, regardless of the actual size of a pointer on the platform. This
|
||||
// is intended for use with certain Windows APIs that expect a pointer as a
|
||||
// ULONGLONG.
|
||||
type ptr64 struct {
|
||||
ptr unsafe.Pointer
|
||||
_ uint32
|
||||
}
|
||||
15
vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_64.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_64.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build amd64 arm64
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
|
||||
// be 64 bits, regardless of the actual size of a pointer on the platform. This
|
||||
// is intended for use with certain Windows APIs that expect a pointer as a
|
||||
// ULONGLONG.
|
||||
type ptr64 struct {
|
||||
ptr unsafe.Pointer
|
||||
}
|
||||
91
vendor/github.com/Microsoft/go-winio/internal/etw/sample/sample.go
generated
vendored
Normal file
91
vendor/github.com/Microsoft/go-winio/internal/etw/sample/sample.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// Shows a sample usage of the ETW logging package.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/etw"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func callback(sourceID *windows.GUID, state etw.ProviderState, level etw.Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr) {
|
||||
fmt.Printf("Callback: isEnabled=%d, level=%d, matchAnyKeyword=%d\n", state, level, matchAnyKeyword)
|
||||
}
|
||||
|
||||
func main() {
|
||||
provider, err := etw.NewProvider("TestProvider", callback)
|
||||
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := provider.Close(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
fmt.Printf("Provider ID: %s\n", provider)
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Println("Press enter to log events")
|
||||
reader.ReadString('\n')
|
||||
|
||||
// Write using high-level API.
|
||||
if err := provider.WriteEvent(
|
||||
"TestEvent",
|
||||
etw.WithEventOpts(
|
||||
etw.WithLevel(etw.LevelInfo),
|
||||
etw.WithKeyword(0x140),
|
||||
),
|
||||
etw.WithFields(
|
||||
etw.StringField("TestField", "Foo"),
|
||||
etw.StringField("TestField2", "Bar"),
|
||||
etw.Struct("TestStruct",
|
||||
etw.StringField("Field1", "Value1"),
|
||||
etw.StringField("Field2", "Value2")),
|
||||
etw.StringArray("TestArray", []string{
|
||||
"Item1",
|
||||
"Item2",
|
||||
"Item3",
|
||||
"Item4",
|
||||
"Item5",
|
||||
})),
|
||||
); err != nil {
|
||||
logrus.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write using low-level API.
|
||||
descriptor := etw.NewEventDescriptor()
|
||||
descriptor.Level = etw.LevelInfo
|
||||
descriptor.Keyword = 0x140
|
||||
em := &etw.EventMetadata{}
|
||||
ed := &etw.EventData{}
|
||||
em.WriteEventHeader("TestEvent", 0)
|
||||
em.WriteField("TestField", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
|
||||
ed.WriteString("Foo")
|
||||
em.WriteField("TestField2", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
|
||||
ed.WriteString("Bar")
|
||||
em.WriteStruct("TestStruct", 2, 0)
|
||||
em.WriteField("Field1", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
|
||||
ed.WriteString("Value1")
|
||||
em.WriteField("Field2", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
|
||||
ed.WriteString("Value2")
|
||||
em.WriteArray("TestArray", etw.InTypeANSIString, etw.OutTypeDefault, 0)
|
||||
ed.WriteUint16(5)
|
||||
ed.WriteString("Item1")
|
||||
ed.WriteString("Item2")
|
||||
ed.WriteString("Item3")
|
||||
ed.WriteString("Item4")
|
||||
ed.WriteString("Item5")
|
||||
if err := provider.WriteEventRaw(descriptor, nil, nil, [][]byte{em.Bytes()}, [][]byte{ed.Bytes()}); err != nil {
|
||||
logrus.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
78
vendor/github.com/Microsoft/go-winio/internal/etw/zsyscall_windows.go
generated
vendored
Normal file
78
vendor/github.com/Microsoft/go-winio/internal/etw/zsyscall_windows.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procEventRegister = modadvapi32.NewProc("EventRegister")
|
||||
procEventUnregister = modadvapi32.NewProc("EventUnregister")
|
||||
procEventWriteTransfer = modadvapi32.NewProc("EventWriteTransfer")
|
||||
procEventSetInformation = modadvapi32.NewProc("EventSetInformation")
|
||||
)
|
||||
|
||||
func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procEventRegister.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(callback), uintptr(callbackContext), uintptr(unsafe.Pointer(providerHandle)), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventUnregister(providerHandle providerHandle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventWriteTransfer(providerHandle providerHandle, descriptor *EventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
421
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
Normal file
421
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
Normal file
@@ -0,0 +1,421 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
cERROR_NO_DATA = syscall.Errno(232)
|
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
cPIPE_ACCESS_DUPLEX = 0x3
|
||||
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
|
||||
|
||||
cPIPE_UNLIMITED_INSTANCES = 255
|
||||
|
||||
cNMPWAIT_USE_DEFAULT_WAIT = 0
|
||||
cNMPWAIT_NOWAIT = 1
|
||||
|
||||
cPIPE_TYPE_MESSAGE = 4
|
||||
|
||||
cPIPE_READMODE_MESSAGE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||
// This error should match net.errClosing since docker takes a dependency on its text.
|
||||
ErrPipeListenerClosed = errors.New("use of closed network connection")
|
||||
|
||||
errPipeWriteClosed = errors.New("pipe has been closed for write")
|
||||
)
|
||||
|
||||
type win32Pipe struct {
|
||||
*win32File
|
||||
path string
|
||||
}
|
||||
|
||||
type win32MessageBytePipe struct {
|
||||
win32Pipe
|
||||
writeClosed bool
|
||||
readEOF bool
|
||||
}
|
||||
|
||||
type pipeAddress string
|
||||
|
||||
func (f *win32Pipe) LocalAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) RemoteAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) SetDeadline(t time.Time) error {
|
||||
f.SetReadDeadline(t)
|
||||
f.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
func (f *win32MessageBytePipe) CloseWrite() error {
|
||||
if f.writeClosed {
|
||||
return errPipeWriteClosed
|
||||
}
|
||||
err := f.win32File.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.win32File.Write(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.writeClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
|
||||
// they are used to implement CloseWrite().
|
||||
func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
|
||||
if f.writeClosed {
|
||||
return 0, errPipeWriteClosed
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return f.win32File.Write(b)
|
||||
}
|
||||
|
||||
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
|
||||
// mode pipe will return io.EOF, as will all subsequent reads.
|
||||
func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
if f.readEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := f.win32File.Read(b)
|
||||
if err == io.EOF {
|
||||
// If this was the result of a zero-byte read, then
|
||||
// it is possible that the read was due to a zero-size
|
||||
// message. Since we are simulating CloseWrite with a
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == syscall.ERROR_MORE_DATA {
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s pipeAddress) Network() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
func (s pipeAddress) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
var absTimeout time.Time
|
||||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
}
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
for {
|
||||
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
break
|
||||
}
|
||||
if time.Now().After(absTimeout) {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
var flags uint32
|
||||
err = getNamedPipeInfo(h, &flags, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the pipe is in message mode, return a message byte pipe, which
|
||||
// supports CloseWrite().
|
||||
if flags&cPIPE_TYPE_MESSAGE != 0 {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: f, path: path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: f, path: path}, nil
|
||||
}
|
||||
|
||||
type acceptResponse struct {
|
||||
f *win32File
|
||||
err error
|
||||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
securityDescriptor []byte
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
|
||||
if first {
|
||||
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
}
|
||||
|
||||
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
|
||||
if c.MessageMode {
|
||||
mode |= cPIPE_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
sa := &syscall.SecurityAttributes{}
|
||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
||||
if securityDescriptor != nil {
|
||||
len := uint32(len(securityDescriptor))
|
||||
sa.SecurityDescriptor = localAlloc(0, len)
|
||||
defer localFree(sa.SecurityDescriptor)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
||||
}
|
||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
|
||||
p, err := l.makeServerPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
p.Close()
|
||||
p = nil
|
||||
}
|
||||
case <-l.closeCh:
|
||||
// Abort the connect request by closing the handle.
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed {
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) listenerRoutine() {
|
||||
closed := false
|
||||
for !closed {
|
||||
select {
|
||||
case <-l.closeCh:
|
||||
closed = true
|
||||
case responseCh := <-l.acceptCh:
|
||||
var (
|
||||
p *win32File
|
||||
err error
|
||||
)
|
||||
for {
|
||||
p, err = l.makeConnectedServerPipe()
|
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != cERROR_NO_DATA {
|
||||
break
|
||||
}
|
||||
}
|
||||
responseCh <- acceptResponse{p, err}
|
||||
closed = err == ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
syscall.Close(l.firstHandle)
|
||||
l.firstHandle = 0
|
||||
// Notify Close() and Accept() callers that the handle has been closed.
|
||||
close(l.doneCh)
|
||||
}
|
||||
|
||||
// PipeConfig contain configuration for the pipe listener.
|
||||
type PipeConfig struct {
|
||||
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
|
||||
SecurityDescriptor string
|
||||
|
||||
// MessageMode determines whether the pipe is in byte or message mode. In either
|
||||
// case the pipe is read in byte mode by default. The only practical difference in
|
||||
// this implementation is that CloseWrite() is only supported for message mode pipes;
|
||||
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
|
||||
// transferred to the reader (and returned as io.EOF in this implementation)
|
||||
// when the pipe is in message mode.
|
||||
MessageMode bool
|
||||
|
||||
// InputBufferSize specifies the size the input buffer, in bytes.
|
||||
InputBufferSize int32
|
||||
|
||||
// OutputBufferSize specifies the size the input buffer, in bytes.
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must not already exist.
|
||||
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
var (
|
||||
sd []byte
|
||||
err error
|
||||
)
|
||||
if c == nil {
|
||||
c = &PipeConfig{}
|
||||
}
|
||||
if c.SecurityDescriptor != "" {
|
||||
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
h, err := makeServerPipeHandle(path, sd, c, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a client handle and connect it. This results in the pipe
|
||||
// instance always existing, so that clients see ERROR_PIPE_BUSY
|
||||
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
|
||||
// up so that no other instances can be used. This would have been
|
||||
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
|
||||
// instead of CreateNamedPipe. (Apparently created named pipes are
|
||||
// considered to be in listening state regardless of whether any
|
||||
// active calls to ConnectNamedPipe are outstanding.)
|
||||
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
// Close the client handle. The server side of the instance will
|
||||
// still be busy, leading to ERROR_PIPE_BUSY instead of
|
||||
// ERROR_NOT_FOUND, as long as we don't close the server handle,
|
||||
// or disconnect the client with DisconnectNamedPipe.
|
||||
syscall.Close(h2)
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
securityDescriptor: sd,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func connectPipe(p *win32File) error {
|
||||
c, err := p.prepareIo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.wg.Done()
|
||||
|
||||
err = connectNamedPipe(p.handle, &c.o)
|
||||
_, err = p.asyncIo(c, nil, 0, err)
|
||||
if err != nil && err != cERROR_PIPE_CONNECTED {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Accept() (net.Conn, error) {
|
||||
ch := make(chan acceptResponse)
|
||||
select {
|
||||
case l.acceptCh <- ch:
|
||||
response := <-ch
|
||||
err := response.err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l.config.MessageMode {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: response.f, path: l.path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: response.f, path: l.path}, nil
|
||||
case <-l.doneCh:
|
||||
return nil, ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Close() error {
|
||||
select {
|
||||
case l.closeCh <- 1:
|
||||
<-l.doneCh
|
||||
case <-l.doneCh:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Addr() net.Addr {
|
||||
return pipeAddress(l.path)
|
||||
}
|
||||
516
vendor/github.com/Microsoft/go-winio/pipe_test.go
generated
vendored
Normal file
516
vendor/github.com/Microsoft/go-winio/pipe_test.go
generated
vendored
Normal file
@@ -0,0 +1,516 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var testPipeName = `\\.\pipe\winiotestpipe`
|
||||
|
||||
var aLongTimeAgo = time.Unix(1, 0)
|
||||
|
||||
func TestDialUnknownFailsImmediately(t *testing.T) {
|
||||
_, err := DialPipe(testPipeName, nil)
|
||||
if err.(*os.PathError).Err != syscall.ENOENT {
|
||||
t.Fatalf("expected ENOENT got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialListenerTimesOut(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
var d = time.Duration(10 * time.Millisecond)
|
||||
_, err = DialPipe(testPipeName, &d)
|
||||
if err != ErrTimeout {
|
||||
t.Fatalf("expected ErrTimeout, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialAccessDeniedWithRestrictedSD(t *testing.T) {
|
||||
c := PipeConfig{
|
||||
SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)",
|
||||
}
|
||||
l, err := ListenPipe(testPipeName, &c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
_, err = DialPipe(testPipeName, nil)
|
||||
if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED {
|
||||
t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) {
|
||||
l, err := ListenPipe(testPipeName, cfg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
type response struct {
|
||||
c net.Conn
|
||||
err error
|
||||
}
|
||||
ch := make(chan response)
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
ch <- response{c, err}
|
||||
}()
|
||||
|
||||
c, err := DialPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r := <-ch
|
||||
if err = r.err; err != nil {
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
|
||||
client = c
|
||||
server = r.c
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadTimeout(t *testing.T) {
|
||||
c, s, err := getConnection(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
defer s.Close()
|
||||
|
||||
c.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
||||
|
||||
buf := make([]byte, 10)
|
||||
_, err = c.Read(buf)
|
||||
if err != ErrTimeout {
|
||||
t.Fatalf("expected ErrTimeout, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func server(l net.Listener, ch chan int) {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
|
||||
s, err := rw.ReadString('\n')
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = rw.WriteString("got " + s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = rw.Flush()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
c.Close()
|
||||
ch <- 1
|
||||
}
|
||||
|
||||
func TestFullListenDialReadWrite(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
ch := make(chan int)
|
||||
go server(l, ch)
|
||||
|
||||
c, err := DialPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
|
||||
_, err = rw.WriteString("hello world\n")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = rw.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s, err := rw.ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ms := "got hello world\n"
|
||||
if s != ms {
|
||||
t.Errorf("expected '%s', got '%s'", ms, s)
|
||||
}
|
||||
|
||||
<-ch
|
||||
}
|
||||
|
||||
func TestCloseAbortsListen(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ch := make(chan error)
|
||||
go func() {
|
||||
_, err := l.Accept()
|
||||
ch <- err
|
||||
}()
|
||||
|
||||
time.Sleep(30 * time.Millisecond)
|
||||
l.Close()
|
||||
|
||||
err = <-ch
|
||||
if err != ErrPipeListenerClosed {
|
||||
t.Fatalf("expected ErrPipeListenerClosed, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) {
|
||||
b := make([]byte, 10)
|
||||
w.Close()
|
||||
n, err := r.Read(b)
|
||||
if n > 0 {
|
||||
t.Errorf("unexpected byte count %d", n)
|
||||
}
|
||||
if err != io.EOF {
|
||||
t.Errorf("expected EOF: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseClientEOFServer(t *testing.T) {
|
||||
c, s, err := getConnection(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
defer s.Close()
|
||||
ensureEOFOnClose(t, c, s)
|
||||
}
|
||||
|
||||
func TestCloseServerEOFClient(t *testing.T) {
|
||||
c, s, err := getConnection(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
defer s.Close()
|
||||
ensureEOFOnClose(t, s, c)
|
||||
}
|
||||
|
||||
func TestCloseWriteEOF(t *testing.T) {
|
||||
cfg := &PipeConfig{
|
||||
MessageMode: true,
|
||||
}
|
||||
c, s, err := getConnection(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
defer s.Close()
|
||||
|
||||
type closeWriter interface {
|
||||
CloseWrite() error
|
||||
}
|
||||
|
||||
err = c.(closeWriter).CloseWrite()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := make([]byte, 10)
|
||||
_, err = s.Read(b)
|
||||
if err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptAfterCloseFails(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
l.Close()
|
||||
_, err = l.Accept()
|
||||
if err != ErrPipeListenerClosed {
|
||||
t.Fatalf("expected ErrPipeListenerClosed, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialTimesOutByDefault(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
_, err = DialPipe(testPipeName, nil)
|
||||
if err != ErrTimeout {
|
||||
t.Fatalf("expected ErrTimeout, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeoutPendingRead(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
serverDone := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
s, err := l.Accept()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
s.Close()
|
||||
close(serverDone)
|
||||
}()
|
||||
|
||||
client, err := DialPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
clientErr := make(chan error)
|
||||
go func() {
|
||||
buf := make([]byte, 10)
|
||||
_, err = client.Read(buf)
|
||||
clientErr <- err
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond) // make *sure* the pipe is reading before we set the deadline
|
||||
client.SetReadDeadline(aLongTimeAgo)
|
||||
|
||||
select {
|
||||
case err = <-clientErr:
|
||||
if err != ErrTimeout {
|
||||
t.Fatalf("expected ErrTimeout, got %v", err)
|
||||
}
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatalf("timed out while waiting for read to cancel")
|
||||
<-clientErr
|
||||
}
|
||||
<-serverDone
|
||||
}
|
||||
|
||||
func TestTimeoutPendingWrite(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
serverDone := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
s, err := l.Accept()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
s.Close()
|
||||
close(serverDone)
|
||||
}()
|
||||
|
||||
client, err := DialPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
clientErr := make(chan error)
|
||||
go func() {
|
||||
_, err = client.Write([]byte("this should timeout"))
|
||||
clientErr <- err
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond) // make *sure* the pipe is writing before we set the deadline
|
||||
client.SetWriteDeadline(aLongTimeAgo)
|
||||
|
||||
select {
|
||||
case err = <-clientErr:
|
||||
if err != ErrTimeout {
|
||||
t.Fatalf("expected ErrTimeout, got %v", err)
|
||||
}
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatalf("timed out while waiting for write to cancel")
|
||||
<-clientErr
|
||||
}
|
||||
<-serverDone
|
||||
}
|
||||
|
||||
type CloseWriter interface {
|
||||
CloseWrite() error
|
||||
}
|
||||
|
||||
func TestEchoWithMessaging(t *testing.T) {
|
||||
c := PipeConfig{
|
||||
MessageMode: true, // Use message mode so that CloseWrite() is supported
|
||||
InputBufferSize: 65536, // Use 64KB buffers to improve performance
|
||||
OutputBufferSize: 65536,
|
||||
}
|
||||
l, err := ListenPipe(testPipeName, &c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
listenerDone := make(chan bool)
|
||||
clientDone := make(chan bool)
|
||||
go func() {
|
||||
// server echo
|
||||
conn, e := l.Accept()
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
time.Sleep(500 * time.Millisecond) // make *sure* we don't begin to read before eof signal is sent
|
||||
io.Copy(conn, conn)
|
||||
conn.(CloseWriter).CloseWrite()
|
||||
close(listenerDone)
|
||||
}()
|
||||
timeout := 1 * time.Second
|
||||
client, err := DialPipe(testPipeName, &timeout)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
go func() {
|
||||
// client read back
|
||||
bytes := make([]byte, 2)
|
||||
n, e := client.Read(bytes)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
if n != 2 {
|
||||
t.Fatalf("expected 2 bytes, got %v", n)
|
||||
}
|
||||
close(clientDone)
|
||||
}()
|
||||
|
||||
payload := make([]byte, 2)
|
||||
payload[0] = 0
|
||||
payload[1] = 1
|
||||
|
||||
n, err := client.Write(payload)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 2 {
|
||||
t.Fatalf("expected 2 bytes, got %v", n)
|
||||
}
|
||||
client.(CloseWriter).CloseWrite()
|
||||
<-listenerDone
|
||||
<-clientDone
|
||||
}
|
||||
|
||||
func TestConnectRace(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
go func() {
|
||||
for {
|
||||
s, err := l.Accept()
|
||||
if err == ErrPipeListenerClosed {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
c, err := DialPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageReadMode(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
l, err := ListenPipe(testPipeName, &PipeConfig{MessageMode: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
msg := ([]byte)("hello world")
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s, err := l.Accept()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = s.Write(msg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s.Close()
|
||||
}()
|
||||
|
||||
c, err := DialPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
setNamedPipeHandleState := syscall.NewLazyDLL("kernel32.dll").NewProc("SetNamedPipeHandleState")
|
||||
|
||||
p := c.(*win32MessageBytePipe)
|
||||
mode := uint32(cPIPE_READMODE_MESSAGE)
|
||||
if s, _, err := setNamedPipeHandleState.Call(uintptr(p.handle), uintptr(unsafe.Pointer(&mode)), 0, 0); s == 0 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ch := make([]byte, 1)
|
||||
var vmsg []byte
|
||||
for {
|
||||
n, err := c.Read(ch)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal("expected 1: ", n)
|
||||
}
|
||||
vmsg = append(vmsg, ch[0])
|
||||
}
|
||||
if !bytes.Equal(msg, vmsg) {
|
||||
t.Fatalf("expected %s: %s", msg, vmsg)
|
||||
}
|
||||
}
|
||||
192
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
Normal file
192
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
package etwlogrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/etw"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Hook is a Logrus hook which logs received events to ETW.
|
||||
type Hook struct {
|
||||
provider *etw.Provider
|
||||
}
|
||||
|
||||
// NewHook registers a new ETW provider and returns a hook to log from it.
|
||||
func NewHook(providerName string) (*Hook, error) {
|
||||
hook := Hook{}
|
||||
|
||||
provider, err := etw.NewProvider(providerName, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hook.provider = provider
|
||||
|
||||
return &hook, nil
|
||||
}
|
||||
|
||||
// Levels returns the set of levels that this hook wants to receive log entries
|
||||
// for.
|
||||
func (h *Hook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.TraceLevel,
|
||||
logrus.DebugLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// Fire receives each Logrus entry as it is logged, and logs it to ETW.
|
||||
func (h *Hook) Fire(e *logrus.Entry) error {
|
||||
level := etw.Level(e.Level)
|
||||
if !h.provider.IsEnabledForLevel(level) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reserve extra space for the message field.
|
||||
fields := make([]etw.FieldOpt, 0, len(e.Data)+1)
|
||||
|
||||
fields = append(fields, etw.StringField("Message", e.Message))
|
||||
|
||||
for k, v := range e.Data {
|
||||
fields = append(fields, getFieldOpt(k, v))
|
||||
}
|
||||
|
||||
// We could try to map Logrus levels to ETW levels, but we would lose some
|
||||
// fidelity as there are fewer ETW levels. So instead we use the level
|
||||
// directly.
|
||||
return h.provider.WriteEvent(
|
||||
"LogrusEntry",
|
||||
etw.WithEventOpts(etw.WithLevel(level)),
|
||||
fields)
|
||||
}
|
||||
|
||||
// Currently, we support logging basic builtin types (int, string, etc), slices
|
||||
// of basic builtin types, error, types derived from the basic types (e.g. "type
|
||||
// foo int"), and structs (recursively logging their fields). We do not support
|
||||
// slices of derived types (e.g. "[]foo").
|
||||
//
|
||||
// For types that we don't support, the value is formatted via fmt.Sprint, and
|
||||
// we also log a message that the type is unsupported along with the formatted
|
||||
// type. The intent of this is to make it easier to see which types are not
|
||||
// supported in traces, so we can evaluate adding support for more types in the
|
||||
// future.
|
||||
func getFieldOpt(k string, v interface{}) etw.FieldOpt {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return etw.BoolField(k, v)
|
||||
case []bool:
|
||||
return etw.BoolArray(k, v)
|
||||
case string:
|
||||
return etw.StringField(k, v)
|
||||
case []string:
|
||||
return etw.StringArray(k, v)
|
||||
case int:
|
||||
return etw.IntField(k, v)
|
||||
case []int:
|
||||
return etw.IntArray(k, v)
|
||||
case int8:
|
||||
return etw.Int8Field(k, v)
|
||||
case []int8:
|
||||
return etw.Int8Array(k, v)
|
||||
case int16:
|
||||
return etw.Int16Field(k, v)
|
||||
case []int16:
|
||||
return etw.Int16Array(k, v)
|
||||
case int32:
|
||||
return etw.Int32Field(k, v)
|
||||
case []int32:
|
||||
return etw.Int32Array(k, v)
|
||||
case int64:
|
||||
return etw.Int64Field(k, v)
|
||||
case []int64:
|
||||
return etw.Int64Array(k, v)
|
||||
case uint:
|
||||
return etw.UintField(k, v)
|
||||
case []uint:
|
||||
return etw.UintArray(k, v)
|
||||
case uint8:
|
||||
return etw.Uint8Field(k, v)
|
||||
case []uint8:
|
||||
return etw.Uint8Array(k, v)
|
||||
case uint16:
|
||||
return etw.Uint16Field(k, v)
|
||||
case []uint16:
|
||||
return etw.Uint16Array(k, v)
|
||||
case uint32:
|
||||
return etw.Uint32Field(k, v)
|
||||
case []uint32:
|
||||
return etw.Uint32Array(k, v)
|
||||
case uint64:
|
||||
return etw.Uint64Field(k, v)
|
||||
case []uint64:
|
||||
return etw.Uint64Array(k, v)
|
||||
case uintptr:
|
||||
return etw.UintptrField(k, v)
|
||||
case []uintptr:
|
||||
return etw.UintptrArray(k, v)
|
||||
case float32:
|
||||
return etw.Float32Field(k, v)
|
||||
case []float32:
|
||||
return etw.Float32Array(k, v)
|
||||
case float64:
|
||||
return etw.Float64Field(k, v)
|
||||
case []float64:
|
||||
return etw.Float64Array(k, v)
|
||||
case error:
|
||||
return etw.StringField(k, v.Error())
|
||||
default:
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return getFieldOpt(k, rv.Bool())
|
||||
case reflect.Int:
|
||||
return getFieldOpt(k, int(rv.Int()))
|
||||
case reflect.Int8:
|
||||
return getFieldOpt(k, int8(rv.Int()))
|
||||
case reflect.Int16:
|
||||
return getFieldOpt(k, int16(rv.Int()))
|
||||
case reflect.Int32:
|
||||
return getFieldOpt(k, int32(rv.Int()))
|
||||
case reflect.Int64:
|
||||
return getFieldOpt(k, int64(rv.Int()))
|
||||
case reflect.Uint:
|
||||
return getFieldOpt(k, uint(rv.Uint()))
|
||||
case reflect.Uint8:
|
||||
return getFieldOpt(k, uint8(rv.Uint()))
|
||||
case reflect.Uint16:
|
||||
return getFieldOpt(k, uint16(rv.Uint()))
|
||||
case reflect.Uint32:
|
||||
return getFieldOpt(k, uint32(rv.Uint()))
|
||||
case reflect.Uint64:
|
||||
return getFieldOpt(k, uint64(rv.Uint()))
|
||||
case reflect.Uintptr:
|
||||
return getFieldOpt(k, uintptr(rv.Uint()))
|
||||
case reflect.Float32:
|
||||
return getFieldOpt(k, float32(rv.Float()))
|
||||
case reflect.Float64:
|
||||
return getFieldOpt(k, float64(rv.Float()))
|
||||
case reflect.String:
|
||||
return getFieldOpt(k, rv.String())
|
||||
case reflect.Struct:
|
||||
fields := make([]etw.FieldOpt, 0, rv.NumField())
|
||||
for i := 0; i < rv.NumField(); i++ {
|
||||
field := rv.Field(i)
|
||||
if field.CanInterface() {
|
||||
fields = append(fields, getFieldOpt(k, field.Interface()))
|
||||
}
|
||||
}
|
||||
return etw.Struct(k, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
return etw.StringField(k, fmt.Sprintf("(Unsupported: %T) %v", v, v))
|
||||
}
|
||||
|
||||
// Close cleans up the hook and closes the ETW provider.
|
||||
func (h *Hook) Close() error {
|
||||
return h.provider.Close()
|
||||
}
|
||||
126
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook_test.go
generated
vendored
Normal file
126
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook_test.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
package etwlogrus
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/go-winio/internal/etw"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func fireEvent(t *testing.T, p *etw.Provider, name string, value interface{}) {
|
||||
if err := p.WriteEvent(
|
||||
name,
|
||||
nil,
|
||||
etw.WithFields(getFieldOpt("Field", value))); err != nil {
|
||||
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// The purpose of this test is to log lots of different field types, to test the
|
||||
// logic that converts them to ETW. Because we don't have a way to
|
||||
// programatically validate the ETW events, this test has two main purposes: (1)
|
||||
// validate nothing causes a panic while logging (2) allow manual validation that
|
||||
// the data is logged correctly (through a tool like WPA).
|
||||
func TestFieldLogging(t *testing.T) {
|
||||
// Sample WPRP to collect this provider:
|
||||
//
|
||||
// <?xml version="1.0"?>
|
||||
// <WindowsPerformanceRecorder Version="1">
|
||||
// <Profiles>
|
||||
// <EventCollector Id="Collector" Name="MyCollector">
|
||||
// <BufferSize Value="256"/>
|
||||
// <Buffers Value="100"/>
|
||||
// </EventCollector>
|
||||
// <EventProvider Id="HookTest" Name="5e50de03-107c-5a83-74c6-998c4491e7e9"/>
|
||||
// <Profile Id="Test.Verbose.File" Name="Test" Description="Test" LoggingMode="File" DetailLevel="Verbose">
|
||||
// <Collectors>
|
||||
// <EventCollectorId Value="Collector">
|
||||
// <EventProviders>
|
||||
// <EventProviderId Value="HookTest"/>
|
||||
// </EventProviders>
|
||||
// </EventCollectorId>
|
||||
// </Collectors>
|
||||
// </Profile>
|
||||
// </Profiles>
|
||||
// </WindowsPerformanceRecorder>
|
||||
//
|
||||
// Start collection:
|
||||
// wpr -start HookTest.wprp -filemode
|
||||
//
|
||||
// Stop collection:
|
||||
// wpr -stop HookTest.etl
|
||||
p, err := etw.NewProvider("HookTest", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := p.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
fireEvent(t, p, "Bool", true)
|
||||
fireEvent(t, p, "BoolSlice", []bool{true, false, true})
|
||||
fireEvent(t, p, "EmptyBoolSlice", []bool{})
|
||||
fireEvent(t, p, "String", "teststring")
|
||||
fireEvent(t, p, "StringSlice", []string{"sstr1", "sstr2", "sstr3"})
|
||||
fireEvent(t, p, "EmptyStringSlice", []string{})
|
||||
fireEvent(t, p, "Int", int(1))
|
||||
fireEvent(t, p, "IntSlice", []int{2, 3, 4})
|
||||
fireEvent(t, p, "EmptyIntSlice", []int{})
|
||||
fireEvent(t, p, "Int8", int8(5))
|
||||
fireEvent(t, p, "Int8Slice", []int8{6, 7, 8})
|
||||
fireEvent(t, p, "EmptyInt8Slice", []int8{})
|
||||
fireEvent(t, p, "Int16", int16(9))
|
||||
fireEvent(t, p, "Int16Slice", []int16{10, 11, 12})
|
||||
fireEvent(t, p, "EmptyInt16Slice", []int16{})
|
||||
fireEvent(t, p, "Int32", int32(13))
|
||||
fireEvent(t, p, "Int32Slice", []int32{14, 15, 16})
|
||||
fireEvent(t, p, "EmptyInt32Slice", []int32{})
|
||||
fireEvent(t, p, "Int64", int64(17))
|
||||
fireEvent(t, p, "Int64Slice", []int64{18, 19, 20})
|
||||
fireEvent(t, p, "EmptyInt64Slice", []int64{})
|
||||
fireEvent(t, p, "Uint", uint(21))
|
||||
fireEvent(t, p, "UintSlice", []uint{22, 23, 24})
|
||||
fireEvent(t, p, "EmptyUintSlice", []uint{})
|
||||
fireEvent(t, p, "Uint8", uint8(25))
|
||||
fireEvent(t, p, "Uint8Slice", []uint8{26, 27, 28})
|
||||
fireEvent(t, p, "EmptyUint8Slice", []uint8{})
|
||||
fireEvent(t, p, "Uint16", uint16(29))
|
||||
fireEvent(t, p, "Uint16Slice", []uint16{30, 31, 32})
|
||||
fireEvent(t, p, "EmptyUint16Slice", []uint16{})
|
||||
fireEvent(t, p, "Uint32", uint32(33))
|
||||
fireEvent(t, p, "Uint32Slice", []uint32{34, 35, 36})
|
||||
fireEvent(t, p, "EmptyUint32Slice", []uint32{})
|
||||
fireEvent(t, p, "Uint64", uint64(37))
|
||||
fireEvent(t, p, "Uint64Slice", []uint64{38, 39, 40})
|
||||
fireEvent(t, p, "EmptyUint64Slice", []uint64{})
|
||||
fireEvent(t, p, "Uintptr", uintptr(41))
|
||||
fireEvent(t, p, "UintptrSlice", []uintptr{42, 43, 44})
|
||||
fireEvent(t, p, "EmptyUintptrSlice", []uintptr{})
|
||||
fireEvent(t, p, "Float32", float32(45.46))
|
||||
fireEvent(t, p, "Float32Slice", []float32{47.48, 49.50, 51.52})
|
||||
fireEvent(t, p, "EmptyFloat32Slice", []float32{})
|
||||
fireEvent(t, p, "Float64", float64(53.54))
|
||||
fireEvent(t, p, "Float64Slice", []float64{55.56, 57.58, 59.60})
|
||||
fireEvent(t, p, "EmptyFloat64Slice", []float64{})
|
||||
|
||||
type struct1 struct {
|
||||
A float32
|
||||
priv int
|
||||
B []uint
|
||||
}
|
||||
type struct2 struct {
|
||||
A int
|
||||
B int
|
||||
}
|
||||
type struct3 struct {
|
||||
struct2
|
||||
A int
|
||||
B string
|
||||
priv string
|
||||
C struct1
|
||||
D uint16
|
||||
}
|
||||
// Unexported fields, and fields in embedded structs, should not log.
|
||||
fireEvent(t, p, "Struct", struct3{struct2{-1, -2}, 1, "2s", "-3s", struct1{3.4, -4, []uint{5, 6, 7}}, 8})
|
||||
}
|
||||
202
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
Normal file
202
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
||||
const (
|
||||
SE_PRIVILEGE_ENABLED = 2
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
securityAnonymous = iota
|
||||
securityIdentification
|
||||
securityImpersonation
|
||||
securityDelegation
|
||||
)
|
||||
|
||||
var (
|
||||
privNames = make(map[string]uint64)
|
||||
privNameMutex sync.Mutex
|
||||
)
|
||||
|
||||
// PrivilegeError represents an error enabling privileges.
|
||||
type PrivilegeError struct {
|
||||
privileges []uint64
|
||||
}
|
||||
|
||||
func (e *PrivilegeError) Error() string {
|
||||
s := ""
|
||||
if len(e.privileges) > 1 {
|
||||
s = "Could not enable privileges "
|
||||
} else {
|
||||
s = "Could not enable privilege "
|
||||
}
|
||||
for i, p := range e.privileges {
|
||||
if i != 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += `"`
|
||||
s += getPrivilegeName(p)
|
||||
s += `"`
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RunWithPrivilege enables a single privilege for a function call.
|
||||
func RunWithPrivilege(name string, fn func() error) error {
|
||||
return RunWithPrivileges([]string{name}, fn)
|
||||
}
|
||||
|
||||
// RunWithPrivileges enables privileges for a function call.
|
||||
func RunWithPrivileges(names []string, fn func() error) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
token, err := newThreadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer releaseThreadToken(token)
|
||||
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn()
|
||||
}
|
||||
|
||||
func mapPrivileges(names []string) ([]uint64, error) {
|
||||
var privileges []uint64
|
||||
privNameMutex.Lock()
|
||||
defer privNameMutex.Unlock()
|
||||
for _, name := range names {
|
||||
p, ok := privNames[name]
|
||||
if !ok {
|
||||
err := lookupPrivilegeValue("", name, &p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privNames[name] = p
|
||||
}
|
||||
privileges = append(privileges, p)
|
||||
}
|
||||
return privileges, nil
|
||||
}
|
||||
|
||||
// EnableProcessPrivileges enables privileges globally for the process.
|
||||
func EnableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
|
||||
}
|
||||
|
||||
// DisableProcessPrivileges disables privileges globally for the process.
|
||||
func DisableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, 0)
|
||||
}
|
||||
|
||||
func enableDisableProcessPrivilege(names []string, action uint32) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, _ := windows.GetCurrentProcess()
|
||||
var token windows.Token
|
||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer token.Close()
|
||||
return adjustPrivileges(token, privileges, action)
|
||||
}
|
||||
|
||||
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||
for _, p := range privileges {
|
||||
binary.Write(&b, binary.LittleEndian, p)
|
||||
binary.Write(&b, binary.LittleEndian, action)
|
||||
}
|
||||
prevState := make([]byte, b.Len())
|
||||
reqSize := uint32(0)
|
||||
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
|
||||
if !success {
|
||||
return err
|
||||
}
|
||||
if err == ERROR_NOT_ALL_ASSIGNED {
|
||||
return &PrivilegeError{privileges}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPrivilegeName(luid uint64) string {
|
||||
var nameBuffer [256]uint16
|
||||
bufSize := uint32(len(nameBuffer))
|
||||
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %d>", luid)
|
||||
}
|
||||
|
||||
var displayNameBuffer [256]uint16
|
||||
displayBufSize := uint32(len(displayNameBuffer))
|
||||
var langID uint32
|
||||
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
|
||||
}
|
||||
|
||||
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
|
||||
}
|
||||
|
||||
func newThreadToken() (windows.Token, error) {
|
||||
err := impersonateSelf(securityImpersonation)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var token windows.Token
|
||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
||||
if err != nil {
|
||||
rerr := revertToSelf()
|
||||
if rerr != nil {
|
||||
panic(rerr)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func releaseThreadToken(h windows.Token) {
|
||||
err := revertToSelf()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h.Close()
|
||||
}
|
||||
17
vendor/github.com/Microsoft/go-winio/privileges_test.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/go-winio/privileges_test.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package winio
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestRunWithUnavailablePrivilege(t *testing.T) {
|
||||
err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil })
|
||||
if _, ok := err.(*PrivilegeError); err == nil || !ok {
|
||||
t.Fatal("expected PrivilegeError")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunWithPrivileges(t *testing.T) {
|
||||
err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil })
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
Normal file
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
reparseTagMountPoint = 0xA0000003
|
||||
reparseTagSymlink = 0xA000000C
|
||||
)
|
||||
|
||||
type reparseDataBuffer struct {
|
||||
ReparseTag uint32
|
||||
ReparseDataLength uint16
|
||||
Reserved uint16
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
}
|
||||
|
||||
// ReparsePoint describes a Win32 symlink or mount point.
|
||||
type ReparsePoint struct {
|
||||
Target string
|
||||
IsMountPoint bool
|
||||
}
|
||||
|
||||
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
|
||||
// mount point reparse point.
|
||||
type UnsupportedReparsePointError struct {
|
||||
Tag uint32
|
||||
}
|
||||
|
||||
func (e *UnsupportedReparsePointError) Error() string {
|
||||
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
|
||||
}
|
||||
|
||||
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||
// or a mount point.
|
||||
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
|
||||
tag := binary.LittleEndian.Uint32(b[0:4])
|
||||
return DecodeReparsePointData(tag, b[8:])
|
||||
}
|
||||
|
||||
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
|
||||
isMountPoint := false
|
||||
switch tag {
|
||||
case reparseTagMountPoint:
|
||||
isMountPoint = true
|
||||
case reparseTagSymlink:
|
||||
default:
|
||||
return nil, &UnsupportedReparsePointError{tag}
|
||||
}
|
||||
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
|
||||
if !isMountPoint {
|
||||
nameOffset += 4
|
||||
}
|
||||
nameLength := binary.LittleEndian.Uint16(b[6:8])
|
||||
name := make([]uint16, nameLength/2)
|
||||
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
|
||||
}
|
||||
|
||||
func isDriveLetter(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
|
||||
// mount point.
|
||||
func EncodeReparsePoint(rp *ReparsePoint) []byte {
|
||||
// Generate an NT path and determine if this is a relative path.
|
||||
var ntTarget string
|
||||
relative := false
|
||||
if strings.HasPrefix(rp.Target, `\\?\`) {
|
||||
ntTarget = `\??\` + rp.Target[4:]
|
||||
} else if strings.HasPrefix(rp.Target, `\\`) {
|
||||
ntTarget = `\??\UNC\` + rp.Target[2:]
|
||||
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
|
||||
ntTarget = `\??\` + rp.Target
|
||||
} else {
|
||||
ntTarget = rp.Target
|
||||
relative = true
|
||||
}
|
||||
|
||||
// The paths must be NUL-terminated even though they are counted strings.
|
||||
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
|
||||
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
|
||||
|
||||
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
|
||||
size += len(ntTarget16)*2 + len(target16)*2
|
||||
|
||||
tag := uint32(reparseTagMountPoint)
|
||||
if !rp.IsMountPoint {
|
||||
tag = reparseTagSymlink
|
||||
size += 4 // Add room for symlink flags
|
||||
}
|
||||
|
||||
data := reparseDataBuffer{
|
||||
ReparseTag: tag,
|
||||
ReparseDataLength: uint16(size),
|
||||
SubstituteNameOffset: 0,
|
||||
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
|
||||
PrintNameOffset: uint16(len(ntTarget16) * 2),
|
||||
PrintNameLength: uint16((len(target16) - 1) * 2),
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, &data)
|
||||
if !rp.IsMountPoint {
|
||||
flags := uint32(0)
|
||||
if relative {
|
||||
flags |= 1
|
||||
}
|
||||
binary.Write(&b, binary.LittleEndian, flags)
|
||||
}
|
||||
|
||||
binary.Write(&b, binary.LittleEndian, ntTarget16)
|
||||
binary.Write(&b, binary.LittleEndian, target16)
|
||||
return b.Bytes()
|
||||
}
|
||||
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
Normal file
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||
//sys localFree(mem uintptr) = LocalFree
|
||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||
|
||||
const (
|
||||
cERROR_NONE_MAPPED = syscall.Errno(1332)
|
||||
)
|
||||
|
||||
type AccountLookupError struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AccountLookupError) Error() string {
|
||||
if e.Name == "" {
|
||||
return "lookup account: empty account name specified"
|
||||
}
|
||||
var s string
|
||||
switch e.Err {
|
||||
case cERROR_NONE_MAPPED:
|
||||
s = "not found"
|
||||
default:
|
||||
s = e.Err.Error()
|
||||
}
|
||||
return "lookup account " + e.Name + ": " + s
|
||||
}
|
||||
|
||||
type SddlConversionError struct {
|
||||
Sddl string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *SddlConversionError) Error() string {
|
||||
return "convert " + e.Sddl + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// LookupSidByName looks up the SID of an account by name
|
||||
func LookupSidByName(name string) (sid string, err error) {
|
||||
if name == "" {
|
||||
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
var sidSize, sidNameUse, refDomainSize uint32
|
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sidBuffer := make([]byte, sidSize)
|
||||
refDomainBuffer := make([]uint16, refDomainSize)
|
||||
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
var strBuffer *uint16
|
||||
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||
localFree(uintptr(unsafe.Pointer(strBuffer)))
|
||||
return sid, nil
|
||||
}
|
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
var sdBuffer uintptr
|
||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
|
||||
if err != nil {
|
||||
return nil, &SddlConversionError{sddl, err}
|
||||
}
|
||||
defer localFree(sdBuffer)
|
||||
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
|
||||
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||
var sddl *uint16
|
||||
// The returned string length seems to including an aribtrary number of terminating NULs.
|
||||
// Don't use it.
|
||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer localFree(uintptr(unsafe.Pointer(sddl)))
|
||||
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
|
||||
}
|
||||
26
vendor/github.com/Microsoft/go-winio/sd_test.go
generated
vendored
Normal file
26
vendor/github.com/Microsoft/go-winio/sd_test.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package winio
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestLookupInvalidSid(t *testing.T) {
|
||||
_, err := LookupSidByName(".\\weoifjdsklfj")
|
||||
aerr, ok := err.(*AccountLookupError)
|
||||
if !ok || aerr.Err != cERROR_NONE_MAPPED {
|
||||
t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupValidSid(t *testing.T) {
|
||||
sid, err := LookupSidByName("Everyone")
|
||||
if err != nil || sid != "S-1-1-0" {
|
||||
t.Fatalf("expected S-1-1-0, got %s, %s", sid, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupEmptyNameFails(t *testing.T) {
|
||||
_, err := LookupSidByName(".\\weoifjdsklfj")
|
||||
aerr, ok := err.(*AccountLookupError)
|
||||
if !ok || aerr.Err != cERROR_NONE_MAPPED {
|
||||
t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err)
|
||||
}
|
||||
}
|
||||
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
Normal file
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
package winio
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
|
||||
25
vendor/github.com/Microsoft/go-winio/tools/etw-provider-gen/main.go
generated
vendored
Normal file
25
vendor/github.com/Microsoft/go-winio/tools/etw-provider-gen/main.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/etw"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var pn = flag.String("provider-name", "", "The human readable ETW provider name to be converted into GUID format")
|
||||
flag.Parse()
|
||||
if pn == nil || *pn == "" {
|
||||
fmt.Fprint(os.Stderr, "--provider-name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
p, err := etw.NewProvider(*pn, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to convert provider-name: '%s' with err: '%s", *pn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer p.Close()
|
||||
fmt.Fprintf(os.Stdout, "%s", p)
|
||||
}
|
||||
901
vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go
generated
vendored
Normal file
901
vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go
generated
vendored
Normal file
@@ -0,0 +1,901 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hard-coding unicode mode for VHD library.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
mksyscall_windows generates windows system call bodies
|
||||
|
||||
It parses all files specified on command line containing function
|
||||
prototypes (like syscall_windows.go) and prints system call bodies
|
||||
to standard output.
|
||||
|
||||
The prototypes are marked by lines beginning with "//sys" and read
|
||||
like func declarations if //sys is replaced by func, but:
|
||||
|
||||
* The parameter lists must give a name for each argument. This
|
||||
includes return parameters.
|
||||
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
|
||||
* If go func name needs to be different from it's winapi dll name,
|
||||
the winapi name could be specified at the end, after "=" sign, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
|
||||
|
||||
* Each function that returns err needs to supply a condition, that
|
||||
return value of winapi will be tested against to detect failure.
|
||||
This would set err to windows "last-error", otherwise it will be nil.
|
||||
The value can be provided at end of //sys declaration, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
|
||||
and is [failretval==0] by default.
|
||||
|
||||
Usage:
|
||||
mksyscall_windows [flags] [path ...]
|
||||
|
||||
The flags are:
|
||||
-output
|
||||
Specify output file name (outputs to console if blank).
|
||||
-trace
|
||||
Generate print statement after every syscall.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
filename = flag.String("output", "", "output file name (standard output if omitted)")
|
||||
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
|
||||
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
|
||||
)
|
||||
|
||||
func trim(s string) string {
|
||||
return strings.Trim(s, " \t")
|
||||
}
|
||||
|
||||
var packageName string
|
||||
|
||||
func packagename() string {
|
||||
return packageName
|
||||
}
|
||||
|
||||
func syscalldot() string {
|
||||
if packageName == "syscall" {
|
||||
return ""
|
||||
}
|
||||
return "syscall."
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
fn *Fn
|
||||
tmpVarIdx int
|
||||
}
|
||||
|
||||
// tmpVar returns temp variable name that will be used to represent p during syscall.
|
||||
func (p *Param) tmpVar() string {
|
||||
if p.tmpVarIdx < 0 {
|
||||
p.tmpVarIdx = p.fn.curTmpVarIdx
|
||||
p.fn.curTmpVarIdx++
|
||||
}
|
||||
return fmt.Sprintf("_p%d", p.tmpVarIdx)
|
||||
}
|
||||
|
||||
// BoolTmpVarCode returns source code for bool temp variable.
|
||||
func (p *Param) BoolTmpVarCode() string {
|
||||
const code = `var %s uint32
|
||||
if %s {
|
||||
%s = 1
|
||||
} else {
|
||||
%s = 0
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
|
||||
}
|
||||
|
||||
// SliceTmpVarCode returns source code for slice temp variable.
|
||||
func (p *Param) SliceTmpVarCode() string {
|
||||
const code = `var %s *%s
|
||||
if len(%s) > 0 {
|
||||
%s = &%s[0]
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
|
||||
}
|
||||
|
||||
// StringTmpVarCode returns source code for string temp variable.
|
||||
func (p *Param) StringTmpVarCode() string {
|
||||
errvar := p.fn.Rets.ErrorVarName()
|
||||
if errvar == "" {
|
||||
errvar = "_"
|
||||
}
|
||||
tmp := p.tmpVar()
|
||||
const code = `var %s %s
|
||||
%s, %s = %s(%s)`
|
||||
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
|
||||
if errvar == "-" {
|
||||
return s
|
||||
}
|
||||
const morecode = `
|
||||
if %s != nil {
|
||||
return
|
||||
}`
|
||||
return s + fmt.Sprintf(morecode, errvar)
|
||||
}
|
||||
|
||||
// TmpVarCode returns source code for temp variable.
|
||||
func (p *Param) TmpVarCode() string {
|
||||
switch {
|
||||
case p.Type == "bool":
|
||||
return p.BoolTmpVarCode()
|
||||
case strings.HasPrefix(p.Type, "[]"):
|
||||
return p.SliceTmpVarCode()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TmpVarHelperCode returns source code for helper's temp variable.
|
||||
func (p *Param) TmpVarHelperCode() string {
|
||||
if p.Type != "string" {
|
||||
return ""
|
||||
}
|
||||
return p.StringTmpVarCode()
|
||||
}
|
||||
|
||||
// SyscallArgList returns source code fragments representing p parameter
|
||||
// in syscall. Slices are translated into 2 syscall parameters: pointer to
|
||||
// the first element and length.
|
||||
func (p *Param) SyscallArgList() []string {
|
||||
t := p.HelperType()
|
||||
var s string
|
||||
switch {
|
||||
case t[0] == '*':
|
||||
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
|
||||
case t == "bool":
|
||||
s = p.tmpVar()
|
||||
case strings.HasPrefix(t, "[]"):
|
||||
return []string{
|
||||
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
|
||||
fmt.Sprintf("uintptr(len(%s))", p.Name),
|
||||
}
|
||||
default:
|
||||
s = p.Name
|
||||
}
|
||||
return []string{fmt.Sprintf("uintptr(%s)", s)}
|
||||
}
|
||||
|
||||
// IsError determines if p parameter is used to return error.
|
||||
func (p *Param) IsError() bool {
|
||||
return p.Name == "err" && p.Type == "error"
|
||||
}
|
||||
|
||||
// HelperType returns type of parameter p used in helper function.
|
||||
func (p *Param) HelperType() string {
|
||||
if p.Type == "string" {
|
||||
return p.fn.StrconvType()
|
||||
}
|
||||
return p.Type
|
||||
}
|
||||
|
||||
// join concatenates parameters ps into a string with sep separator.
|
||||
// Each parameter is converted into string by applying fn to it
|
||||
// before conversion.
|
||||
func join(ps []*Param, fn func(*Param) string, sep string) string {
|
||||
if len(ps) == 0 {
|
||||
return ""
|
||||
}
|
||||
a := make([]string, 0)
|
||||
for _, p := range ps {
|
||||
a = append(a, fn(p))
|
||||
}
|
||||
return strings.Join(a, sep)
|
||||
}
|
||||
|
||||
// Rets describes function return parameters.
|
||||
type Rets struct {
|
||||
Name string
|
||||
Type string
|
||||
ReturnsError bool
|
||||
FailCond string
|
||||
}
|
||||
|
||||
// ErrorVarName returns error variable name for r.
|
||||
func (r *Rets) ErrorVarName() string {
|
||||
if r.ReturnsError {
|
||||
return "err"
|
||||
}
|
||||
if r.Type == "error" {
|
||||
return r.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ToParams converts r into slice of *Param.
|
||||
func (r *Rets) ToParams() []*Param {
|
||||
ps := make([]*Param, 0)
|
||||
if len(r.Name) > 0 {
|
||||
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
|
||||
}
|
||||
if r.ReturnsError {
|
||||
ps = append(ps, &Param{Name: "err", Type: "error"})
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// List returns source code of syscall return parameters.
|
||||
func (r *Rets) List() string {
|
||||
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
if len(s) > 0 {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// PrintList returns source code of trace printing part correspondent
|
||||
// to syscall return values.
|
||||
func (r *Rets) PrintList() string {
|
||||
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// SetReturnValuesCode returns source code that accepts syscall return values.
|
||||
func (r *Rets) SetReturnValuesCode() string {
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
retvar := "r0"
|
||||
if r.Name == "" {
|
||||
retvar = "r1"
|
||||
}
|
||||
errvar := "_"
|
||||
if r.ReturnsError {
|
||||
errvar = "e1"
|
||||
}
|
||||
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
|
||||
}
|
||||
|
||||
func (r *Rets) useLongHandleErrorCode(retvar string) string {
|
||||
const code = `if %s {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = %sEINVAL
|
||||
}
|
||||
}`
|
||||
cond := retvar + " == 0"
|
||||
if r.FailCond != "" {
|
||||
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
|
||||
}
|
||||
return fmt.Sprintf(code, cond, syscalldot())
|
||||
}
|
||||
|
||||
// SetErrorCode returns source code that sets return parameters.
|
||||
func (r *Rets) SetErrorCode() string {
|
||||
const code = `if r0 != 0 {
|
||||
%s = %sErrno(r0)
|
||||
}`
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
if r.Name == "" {
|
||||
return r.useLongHandleErrorCode("r1")
|
||||
}
|
||||
if r.Type == "error" {
|
||||
return fmt.Sprintf(code, r.Name, syscalldot())
|
||||
}
|
||||
s := ""
|
||||
switch {
|
||||
case r.Type[0] == '*':
|
||||
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
|
||||
case r.Type == "bool":
|
||||
s = fmt.Sprintf("%s = r0 != 0", r.Name)
|
||||
default:
|
||||
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
|
||||
}
|
||||
if !r.ReturnsError {
|
||||
return s
|
||||
}
|
||||
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
|
||||
}
|
||||
|
||||
// Fn describes syscall function.
|
||||
type Fn struct {
|
||||
Name string
|
||||
Params []*Param
|
||||
Rets *Rets
|
||||
PrintTrace bool
|
||||
dllname string
|
||||
dllfuncname string
|
||||
src string
|
||||
// TODO: get rid of this field and just use parameter index instead
|
||||
curTmpVarIdx int // insure tmp variables have uniq names
|
||||
}
|
||||
|
||||
// extractParams parses s to extract function parameters.
|
||||
func extractParams(s string, f *Fn) ([]*Param, error) {
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
a := strings.Split(s, ",")
|
||||
ps := make([]*Param, len(a))
|
||||
for i := range ps {
|
||||
s2 := trim(a[i])
|
||||
b := strings.Split(s2, " ")
|
||||
if len(b) != 2 {
|
||||
b = strings.Split(s2, "\t")
|
||||
if len(b) != 2 {
|
||||
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
|
||||
}
|
||||
}
|
||||
ps[i] = &Param{
|
||||
Name: trim(b[0]),
|
||||
Type: trim(b[1]),
|
||||
fn: f,
|
||||
tmpVarIdx: -1,
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// extractSection extracts text out of string s starting after start
|
||||
// and ending just before end. found return value will indicate success,
|
||||
// and prefix, body and suffix will contain correspondent parts of string s.
|
||||
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
|
||||
s = trim(s)
|
||||
if strings.HasPrefix(s, string(start)) {
|
||||
// no prefix
|
||||
body = s[1:]
|
||||
} else {
|
||||
a := strings.SplitN(s, string(start), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", s, false
|
||||
}
|
||||
prefix = a[0]
|
||||
body = a[1]
|
||||
}
|
||||
a := strings.SplitN(body, string(end), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", "", false
|
||||
}
|
||||
return prefix, a[0], a[1], true
|
||||
}
|
||||
|
||||
// newFn parses string s and return created function Fn.
|
||||
func newFn(s string) (*Fn, error) {
|
||||
s = trim(s)
|
||||
f := &Fn{
|
||||
Rets: &Rets{},
|
||||
src: s,
|
||||
PrintTrace: *printTraceFlag,
|
||||
}
|
||||
// function name and args
|
||||
prefix, body, s, found := extractSection(s, '(', ')')
|
||||
if !found || prefix == "" {
|
||||
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
|
||||
}
|
||||
f.Name = prefix
|
||||
var err error
|
||||
f.Params, err = extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return values
|
||||
_, body, s, found = extractSection(s, '(', ')')
|
||||
if found {
|
||||
r, err := extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(r) {
|
||||
case 0:
|
||||
case 1:
|
||||
if r[0].IsError() {
|
||||
f.Rets.ReturnsError = true
|
||||
} else {
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
}
|
||||
case 2:
|
||||
if !r[1].IsError() {
|
||||
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
|
||||
}
|
||||
f.Rets.ReturnsError = true
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
default:
|
||||
return nil, errors.New("Too many return values in \"" + f.src + "\"")
|
||||
}
|
||||
}
|
||||
// fail condition
|
||||
_, body, s, found = extractSection(s, '[', ']')
|
||||
if found {
|
||||
f.Rets.FailCond = body
|
||||
}
|
||||
// dll and dll function names
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return f, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
s = trim(s[1:])
|
||||
a := strings.Split(s, ".")
|
||||
switch len(a) {
|
||||
case 1:
|
||||
f.dllfuncname = a[0]
|
||||
case 2:
|
||||
f.dllname = a[0]
|
||||
f.dllfuncname = a[1]
|
||||
default:
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// DLLName returns DLL name for function f.
|
||||
func (f *Fn) DLLName() string {
|
||||
if f.dllname == "" {
|
||||
return "kernel32"
|
||||
}
|
||||
return f.dllname
|
||||
}
|
||||
|
||||
// DLLName returns DLL function name for function f.
|
||||
func (f *Fn) DLLFuncName() string {
|
||||
if f.dllfuncname == "" {
|
||||
return f.Name
|
||||
}
|
||||
return f.dllfuncname
|
||||
}
|
||||
|
||||
// ParamList returns source code for function f parameters.
|
||||
func (f *Fn) ParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
}
|
||||
|
||||
// HelperParamList returns source code for helper function f parameters.
|
||||
func (f *Fn) HelperParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
|
||||
}
|
||||
|
||||
// ParamPrintList returns source code of trace printing part correspondent
|
||||
// to syscall input parameters.
|
||||
func (f *Fn) ParamPrintList() string {
|
||||
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// ParamCount return number of syscall parameters for function f.
|
||||
func (f *Fn) ParamCount() int {
|
||||
n := 0
|
||||
for _, p := range f.Params {
|
||||
n += len(p.SyscallArgList())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
|
||||
// to use. It returns parameter count for correspondent SyscallX function.
|
||||
func (f *Fn) SyscallParamCount() int {
|
||||
n := f.ParamCount()
|
||||
switch {
|
||||
case n <= 3:
|
||||
return 3
|
||||
case n <= 6:
|
||||
return 6
|
||||
case n <= 9:
|
||||
return 9
|
||||
case n <= 12:
|
||||
return 12
|
||||
case n <= 15:
|
||||
return 15
|
||||
default:
|
||||
panic("too many arguments to system call")
|
||||
}
|
||||
}
|
||||
|
||||
// Syscall determines which SyscallX function to use for function f.
|
||||
func (f *Fn) Syscall() string {
|
||||
c := f.SyscallParamCount()
|
||||
if c == 3 {
|
||||
return syscalldot() + "Syscall"
|
||||
}
|
||||
return syscalldot() + "Syscall" + strconv.Itoa(c)
|
||||
}
|
||||
|
||||
// SyscallParamList returns source code for SyscallX parameters for function f.
|
||||
func (f *Fn) SyscallParamList() string {
|
||||
a := make([]string, 0)
|
||||
for _, p := range f.Params {
|
||||
a = append(a, p.SyscallArgList()...)
|
||||
}
|
||||
for len(a) < f.SyscallParamCount() {
|
||||
a = append(a, "0")
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// HelperCallParamList returns source code of call into function f helper.
|
||||
func (f *Fn) HelperCallParamList() string {
|
||||
a := make([]string, 0, len(f.Params))
|
||||
for _, p := range f.Params {
|
||||
s := p.Name
|
||||
if p.Type == "string" {
|
||||
s = p.tmpVar()
|
||||
}
|
||||
a = append(a, s)
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// IsUTF16 is true, if f is W (utf16) function. It is false
|
||||
// for all A (ascii) functions.
|
||||
func (f *Fn) IsUTF16() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// StrconvFunc returns name of Go string to OS string function for f.
|
||||
func (f *Fn) StrconvFunc() string {
|
||||
if f.IsUTF16() {
|
||||
return syscalldot() + "UTF16PtrFromString"
|
||||
}
|
||||
return syscalldot() + "BytePtrFromString"
|
||||
}
|
||||
|
||||
// StrconvType returns Go type name used for OS string for f.
|
||||
func (f *Fn) StrconvType() string {
|
||||
if f.IsUTF16() {
|
||||
return "*uint16"
|
||||
}
|
||||
return "*byte"
|
||||
}
|
||||
|
||||
// HasStringParam is true, if f has at least one string parameter.
|
||||
// Otherwise it is false.
|
||||
func (f *Fn) HasStringParam() bool {
|
||||
for _, p := range f.Params {
|
||||
if p.Type == "string" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HelperName returns name of function f helper.
|
||||
func (f *Fn) HelperName() string {
|
||||
if !f.HasStringParam() {
|
||||
return f.Name
|
||||
}
|
||||
return "_" + f.Name
|
||||
}
|
||||
|
||||
// Source files and functions.
|
||||
type Source struct {
|
||||
Funcs []*Fn
|
||||
Files []string
|
||||
StdLibImports []string
|
||||
ExternalImports []string
|
||||
}
|
||||
|
||||
func (src *Source) Import(pkg string) {
|
||||
src.StdLibImports = append(src.StdLibImports, pkg)
|
||||
sort.Strings(src.StdLibImports)
|
||||
}
|
||||
|
||||
func (src *Source) ExternalImport(pkg string) {
|
||||
src.ExternalImports = append(src.ExternalImports, pkg)
|
||||
sort.Strings(src.ExternalImports)
|
||||
}
|
||||
|
||||
// ParseFiles parses files listed in fs and extracts all syscall
|
||||
// functions listed in sys comments. It returns source files
|
||||
// and functions collection *Source if successful.
|
||||
func ParseFiles(fs []string) (*Source, error) {
|
||||
src := &Source{
|
||||
Funcs: make([]*Fn, 0),
|
||||
Files: make([]string, 0),
|
||||
StdLibImports: []string{
|
||||
"unsafe",
|
||||
},
|
||||
ExternalImports: make([]string, 0),
|
||||
}
|
||||
for _, file := range fs {
|
||||
if err := src.ParseFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// DLLs return dll names for a source set src.
|
||||
func (src *Source) DLLs() []string {
|
||||
uniq := make(map[string]bool)
|
||||
r := make([]string, 0)
|
||||
for _, f := range src.Funcs {
|
||||
name := f.DLLName()
|
||||
if _, found := uniq[name]; !found {
|
||||
uniq[name] = true
|
||||
r = append(r, name)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ParseFile adds additional file path to a source set src.
|
||||
func (src *Source) ParseFile(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := trim(s.Text())
|
||||
if len(t) < 7 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(t, "//sys") {
|
||||
continue
|
||||
}
|
||||
t = t[5:]
|
||||
if !(t[0] == ' ' || t[0] == '\t') {
|
||||
continue
|
||||
}
|
||||
f, err := newFn(t[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src.Funcs = append(src.Funcs, f)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
src.Files = append(src.Files, path)
|
||||
|
||||
// get package name
|
||||
fset := token.NewFileSet()
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packageName = pkg.Name.Name
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStdRepo returns true if src is part of standard library.
|
||||
func (src *Source) IsStdRepo() (bool, error) {
|
||||
if len(src.Files) == 0 {
|
||||
return false, errors.New("no input files provided")
|
||||
}
|
||||
abspath, err := filepath.Abs(src.Files[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
goroot := runtime.GOROOT()
|
||||
if runtime.GOOS == "windows" {
|
||||
abspath = strings.ToLower(abspath)
|
||||
goroot = strings.ToLower(goroot)
|
||||
}
|
||||
sep := string(os.PathSeparator)
|
||||
if !strings.HasSuffix(goroot, sep) {
|
||||
goroot += sep
|
||||
}
|
||||
return strings.HasPrefix(abspath, goroot), nil
|
||||
}
|
||||
|
||||
// Generate output source file from a source set src.
|
||||
func (src *Source) Generate(w io.Writer) error {
|
||||
const (
|
||||
pkgStd = iota // any package in std library
|
||||
pkgXSysWindows // x/sys/windows package
|
||||
pkgOther
|
||||
)
|
||||
isStdRepo, err := src.IsStdRepo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pkgtype int
|
||||
switch {
|
||||
case isStdRepo:
|
||||
pkgtype = pkgStd
|
||||
case packageName == "windows":
|
||||
// TODO: this needs better logic than just using package name
|
||||
pkgtype = pkgXSysWindows
|
||||
default:
|
||||
pkgtype = pkgOther
|
||||
}
|
||||
if *systemDLL {
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
src.Import("internal/syscall/windows/sysdll")
|
||||
case pkgXSysWindows:
|
||||
default:
|
||||
src.ExternalImport("golang.org/x/sys/windows")
|
||||
}
|
||||
}
|
||||
if packageName != "syscall" {
|
||||
src.Import("syscall")
|
||||
}
|
||||
funcMap := template.FuncMap{
|
||||
"packagename": packagename,
|
||||
"syscalldot": syscalldot,
|
||||
"newlazydll": func(dll string) string {
|
||||
arg := "\"" + dll + ".dll\""
|
||||
if !*systemDLL {
|
||||
return syscalldot() + "NewLazyDLL(" + arg + ")"
|
||||
}
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
|
||||
case pkgXSysWindows:
|
||||
return "NewLazySystemDLL(" + arg + ")"
|
||||
default:
|
||||
return "windows.NewLazySystemDLL(" + arg + ")"
|
||||
}
|
||||
},
|
||||
}
|
||||
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
|
||||
err = t.Execute(w, src)
|
||||
if err != nil {
|
||||
return errors.New("Failed to execute template: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
src, err := ParseFiles(flag.Args())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := src.Generate(&buf); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *filename == "" {
|
||||
_, err = os.Stdout.Write(data)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*filename, data, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: use println instead to print in the following template
|
||||
const srcTemplate = `
|
||||
|
||||
{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package {{packagename}}
|
||||
|
||||
import (
|
||||
{{range .StdLibImports}}"{{.}}"
|
||||
{{end}}
|
||||
|
||||
{{range .ExternalImports}}"{{.}}"
|
||||
{{end}}
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e {{syscalldot}}Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
{{template "dlls" .}}
|
||||
{{template "funcnames" .}})
|
||||
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
|
||||
{{end}}
|
||||
|
||||
{{/* help functions */}}
|
||||
|
||||
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "funcnames"}}{{range .Funcs}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}")
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "helperbody"}}
|
||||
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
|
||||
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "funcbody"}}
|
||||
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
|
||||
{{template "tmpvars" .}} {{template "syscall" .}}
|
||||
{{template "seterror" .}}{{template "printtrace" .}} return
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
|
||||
|
||||
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
|
||||
|
||||
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
|
||||
{{end}}{{end}}
|
||||
|
||||
`
|
||||
108
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
Normal file
108
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// +build windows
|
||||
|
||||
package vhd
|
||||
|
||||
import "syscall"
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.OpenVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = VirtDisk.DetachVirtualDisk
|
||||
|
||||
type virtualStorageType struct {
|
||||
DeviceID uint32
|
||||
VendorID [16]byte
|
||||
}
|
||||
|
||||
const virtualDiskAccessNONE uint32 = 0
|
||||
const virtualDiskAccessATTACHRO uint32 = 65536
|
||||
const virtualDiskAccessATTACHRW uint32 = 131072
|
||||
const virtualDiskAccessDETACH uint32 = 262144
|
||||
const virtualDiskAccessGETINFO uint32 = 524288
|
||||
const virtualDiskAccessCREATE uint32 = 1048576
|
||||
const virtualDiskAccessMETAOPS uint32 = 2097152
|
||||
const virtualDiskAccessREAD uint32 = 851968
|
||||
const virtualDiskAccessALL uint32 = 4128768
|
||||
const virtualDiskAccessWRITABLE uint32 = 3276800
|
||||
|
||||
const createVirtualDiskFlagNone uint32 = 0
|
||||
const createVirtualDiskFlagFullPhysicalAllocation uint32 = 1
|
||||
const createVirtualDiskFlagPreventWritesToSourceDisk uint32 = 2
|
||||
const createVirtualDiskFlagDoNotCopyMetadataFromParent uint32 = 4
|
||||
|
||||
type version2 struct {
|
||||
UniqueID [16]byte // GUID
|
||||
MaximumSize uint64
|
||||
BlockSizeInBytes uint32
|
||||
SectorSizeInBytes uint32
|
||||
ParentPath *uint16 // string
|
||||
SourcePath *uint16 // string
|
||||
OpenFlags uint32
|
||||
ParentVirtualStorageType virtualStorageType
|
||||
SourceVirtualStorageType virtualStorageType
|
||||
ResiliencyGUID [16]byte // GUID
|
||||
}
|
||||
|
||||
type createVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 version2
|
||||
}
|
||||
|
||||
// CreateVhdx will create a simple vhdx file at the given path using default values.
|
||||
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
var defaultType virtualStorageType
|
||||
|
||||
parameters := createVirtualDiskParameters{
|
||||
Version: 2,
|
||||
Version2: version2{
|
||||
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
|
||||
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
|
||||
},
|
||||
}
|
||||
|
||||
var handle syscall.Handle
|
||||
|
||||
if err := createVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
virtualDiskAccessNONE,
|
||||
nil,
|
||||
createVirtualDiskFlagNone,
|
||||
0,
|
||||
¶meters,
|
||||
nil,
|
||||
&handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachVhd detaches a VHD attached at the given path.
|
||||
func DetachVhd(path string) error {
|
||||
var (
|
||||
defaultType virtualStorageType
|
||||
handle syscall.Handle
|
||||
)
|
||||
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
virtualDiskAccessDETACH,
|
||||
0,
|
||||
nil,
|
||||
&handle); err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
Normal file
99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package vhd
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
|
||||
|
||||
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
|
||||
procOpenVirtualDisk = modVirtDisk.NewProc("OpenVirtualDisk")
|
||||
procDetachVirtualDisk = modVirtDisk.NewProc("DetachVirtualDisk")
|
||||
)
|
||||
|
||||
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
|
||||
}
|
||||
|
||||
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(flags), uintptr(providerSpecificFlags))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user