mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 22:16:38 +00:00
Compare commits
46 Commits
v0.5.0
...
v0.7.999-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
daa6f3d111 | ||
|
|
85fdfb44b8 | ||
|
|
33879449a2 | ||
|
|
462a136673 | ||
|
|
d5e39892cf | ||
|
|
ec0d863c29 | ||
|
|
afc3655a41 | ||
|
|
e25e96a62e | ||
|
|
23d92cfcae | ||
|
|
1258703f23 | ||
|
|
8841091f9c | ||
|
|
517cd3b04b | ||
|
|
9daa8c8775 | ||
|
|
e04d3f414d | ||
|
|
4c69ed1610 | ||
|
|
a171401f57 | ||
|
|
e24e0dc9f5 | ||
|
|
0eab86c731 | ||
|
|
13c68634ce | ||
|
|
73ad1ba960 | ||
|
|
0121fd6471 | ||
|
|
93904954f4 | ||
|
|
f2462b26c8 | ||
|
|
7e05621b26 | ||
|
|
76ddad34b8 | ||
|
|
2053dea3ac | ||
|
|
35b81dcdd0 | ||
|
|
39b0000514 | ||
|
|
76ec763c42 | ||
|
|
7ccc47cc51 | ||
|
|
ad29ac0792 | ||
|
|
d58ce114d9 | ||
|
|
5f9dfcc378 | ||
|
|
f4e5bc3d29 | ||
|
|
f4362c5987 | ||
|
|
f691b48304 | ||
|
|
d12d31a17f | ||
|
|
48d23cfb12 | ||
|
|
17039b8206 | ||
|
|
2993552e19 | ||
|
|
5d4cafc0a1 | ||
|
|
a70c57ffd1 | ||
|
|
b2cb04834a | ||
|
|
f27fdbbbf5 | ||
|
|
7dda8eba03 | ||
|
|
080f80eb26 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@
|
||||
VERSION
|
||||
*.swp
|
||||
*.un~
|
||||
output/
|
||||
|
||||
@@ -12,3 +12,7 @@ build:
|
||||
tarball:
|
||||
files:
|
||||
- LICENSE
|
||||
crossbuild:
|
||||
platforms:
|
||||
- windows/amd64
|
||||
- windows/386
|
||||
|
||||
6
Gopkg.lock
generated
6
Gopkg.lock
generated
@@ -37,12 +37,12 @@
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ec6271918b59b872a2d25e374569a4f75f1839d91e4191470c297b7eaaaf7641"
|
||||
digest = "1:f9adc21a937e5da643ea14a3488cb7506788876737a5e205394e508627a6eec8"
|
||||
name = "github.com/dimchansky/utfbom"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c"
|
||||
version = "v1.0.0"
|
||||
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cb4e216bd9f58866f42dc65893455b24f879b026fdaa1ecc3aafff625fdb5a66"
|
||||
|
||||
18
Makefile
18
Makefile
@@ -1,5 +1,19 @@
|
||||
fmt:
|
||||
gofmt -l -w -s .
|
||||
export GOOS=windows
|
||||
|
||||
build:
|
||||
promu build -v
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
lint:
|
||||
gometalinter --vendor --config gometalinter.config ./...
|
||||
|
||||
fmt:
|
||||
gofmt -l -w -s .
|
||||
|
||||
crossbuild:
|
||||
# The prometheus/golang-builder image for promu crossbuild doesn't exist
|
||||
# on Windows, so for now, we'll just build twice
|
||||
GOARCH=amd64 promu build --prefix=output/amd64
|
||||
GOARCH=386 promu build --prefix=output/386
|
||||
|
||||
44
README.md
44
README.md
@@ -9,22 +9,30 @@ Prometheus exporter for Windows machines, using the WMI (Windows Management Inst
|
||||
|
||||
Name | Description | Enabled by default
|
||||
---------|-------------|--------------------
|
||||
[ad](docs/collector.ad.md) | [Win32_PerfRawData_DirectoryServices_DirectoryServices](https://msdn.microsoft.com/en-us/library/ms803980.aspx) Active Directory |
|
||||
[cpu](docs/collector.cpu.md) | [Win32_PerfRawData_PerfOS_Processor](https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx) metrics (cpu usage) | ✓
|
||||
[cs](docs/collector.cs.md) | [Win32_ComputerSystem](https://msdn.microsoft.com/en-us/library/aa394102) metrics (system properties, num cpus/total memory) | ✓
|
||||
[dns](docs/collector.dns.md) | [Win32_PerfRawData_DNS_DNS](https://technet.microsoft.com/en-us/library/cc977686.aspx) metrics (DNS Server) |
|
||||
[hyperv](docs/collector.hyperv.md) | Performance counters for Hyper-V hosts |
|
||||
[iis](docs/collector.iis.md) | [Win32_PerfRawData_W3SVC_WebService](https://msdn.microsoft.com/en-us/library/aa394345) IIS metrics |
|
||||
[logical_disk](docs/collector.logical_disk.md) | [Win32_PerfRawData_PerfDisk_LogicalDisk](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)) metrics (disk I/O) | ✓
|
||||
[net](docs/collector.net.md) | [Win32_PerfRawData_Tcpip_NetworkInterface](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)) metrics (network interface I/O) | ✓
|
||||
[ad](docs/collector.ad.md) | Active Directory Domain Services |
|
||||
[cpu](docs/collector.cpu.md) | CPU usage | ✓
|
||||
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | ✓
|
||||
[dns](docs/collector.dns.md) | DNS Server |
|
||||
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics |
|
||||
[msmq](docs/collector.msmq.md) | [Win32_PerfRawData_MSMQ_MSMQQueue](http://wutils.com/wmi/root/cimv2/win32_perfrawdata_msmq_msmqqueue/) metrics (MSMQ/journal count) |
|
||||
[mssql](docs/collector.mssql.md) | various [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
|
||||
[os](docs/collector.os.md) | [Win32_OperatingSystem](https://msdn.microsoft.com/en-us/library/aa394239) metrics (memory, processes, users) | ✓
|
||||
[process](docs/collector.process.md) | [Win32_PerfRawData_PerfProc_Process](https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx) metrics (per-process stats) |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
|
||||
[netframework_clrexceptions](docs/collector.netframework_clrexceptions.md) | .NET Framework CLR Exceptions |
|
||||
[netframework_clrinterop](docs/collector.netframework_clrinterop.md) | .NET Framework Interop Metrics |
|
||||
[netframework_clrjit](docs/collector.netframework_clrjit.md) | .NET Framework JIT metrics |
|
||||
[netframework_clrloading](docs/collector.netframework_clrloading.md) | .NET Framework CLR Loading metrics |
|
||||
[netframework_clrlocksandthreads](docs/collector.netframework_clrlocksandthreads.md) | .NET Framework locks and metrics threads |
|
||||
[netframework_clrmemory](docs/collector.netframework_clrmemory.md) | .NET Framework Memory metrics |
|
||||
[netframework_clrremoting](docs/collector.netframework_clrremoting.md) | .NET Framework Remoting metrics |
|
||||
[netframework_clrsecurity](docs/collector.netframework_clrsecurity.md) | .NET Framework Security Check metrics |
|
||||
[net](docs/collector.net.md) | Network interface I/O | ✓
|
||||
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | ✓
|
||||
[process](docs/collector.process.md) | Per-process metrics |
|
||||
[service](docs/collector.service.md) | Service state metrics | ✓
|
||||
[system](docs/collector.system.md) | Win32_PerfRawData_PerfOS_System metrics (system calls) | ✓
|
||||
[tcp](docs/collector.tcp.md) | [Win32_PerfRawData_Tcpip_TCPv4](https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx) metrics (tcp connections) |
|
||||
[system](docs/collector.system.md) | System calls | ✓
|
||||
[tcp](docs/collector.tcp.md) | TCP connections |
|
||||
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | ✓
|
||||
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
|
||||
|
||||
@@ -44,13 +52,19 @@ Name | Description
|
||||
`LISTEN_PORT` | The port to bind to. Defaults to 9182.
|
||||
`METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics`
|
||||
`TEXTFILE_DIR` | As the `--collector.textfile.directory` flag, provide a directory to read text files with metrics from
|
||||
`EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string.
|
||||
|
||||
Parameters are sent to the installer via `msiexec`. Example invocation:
|
||||
Parameters are sent to the installer via `msiexec`. Example invocations:
|
||||
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,iis LISTEN_PORT=5000
|
||||
```
|
||||
|
||||
Example service collector with a custom query.
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--collector.service.services-where ""Name LIKE 'sql%'"""
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
|
||||
117
appveyor.yml
117
appveyor.yml
@@ -1,51 +1,66 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
matrix:
|
||||
- MSI_ARCH: amd64
|
||||
GOARCH: amd64
|
||||
- MSI_ARCH: 386
|
||||
GOARCH: 386
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- go version
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%GOPATH%\bin\windows_%GOARCH%;%PATH%
|
||||
- go get -u github.com/prometheus/promu
|
||||
- choco install gitversion.portable -y
|
||||
|
||||
build_script:
|
||||
- ps: gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
- go test -v ./...
|
||||
- promu build -v
|
||||
- ps: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
if($env:APPVEYOR_REPO_TAG -eq "True") {
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$Version = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
Write-Verbose "Setting msi version to $Version"
|
||||
.\installer\build.ps1 -PathToExecutable .\wmi_exporter.exe -Version $Version -Arch "$env:MSI_ARCH"
|
||||
Push-AppveyorArtifact installer\Output\wmi_exporter-$Version-$env:MSI_ARCH.msi -DeploymentName Installer
|
||||
}
|
||||
|
||||
after_build:
|
||||
- 7z a wmi_exporter-%MSI_ARCH%.zip wmi_exporter.exe
|
||||
|
||||
artifacts:
|
||||
- name: Executable
|
||||
path: 'wmi_exporter-*.zip'
|
||||
|
||||
deploy:
|
||||
- provider: GitHub
|
||||
description: WMI Exporter version $(appveyor_build_version)
|
||||
artifact: Executable,Installer
|
||||
auth_token:
|
||||
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
||||
version: "{build}"
|
||||
|
||||
os: Visual Studio 2017
|
||||
build: off
|
||||
stack: go 1.10
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
- go get -u github.com/prometheus/promu
|
||||
- go get -u github.com/alecthomas/gometalinter && gometalinter --install
|
||||
- choco install gitversion.portable make -y
|
||||
|
||||
test_script:
|
||||
- make test
|
||||
|
||||
after_test:
|
||||
- make lint
|
||||
|
||||
build_script:
|
||||
- ps: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
$Version = Get-Content VERSION
|
||||
make crossbuild
|
||||
# GH requires all files to have different names, so add version/arch to differentiate
|
||||
foreach($Arch in "amd64","386") {
|
||||
Rename-Item output\$Arch\wmi_exporter.exe -NewName wmi_exporter-$Version-$Arch.exe
|
||||
}
|
||||
|
||||
after_build:
|
||||
- ps: |
|
||||
# Build installer packages only on tagged releases
|
||||
if($env:APPVEYOR_REPO_TAG -ne "True") {
|
||||
return
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
$BuildVersion = Get-Content VERSION
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$MSIVersion = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
foreach($Arch in "amd64","386") {
|
||||
Write-Verbose "Building wmi_exporter $MSIVersion msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
|
||||
Move-Item installer\Output\wmi_exporter-$MSIVersion-$Arch.msi output\$Arch\
|
||||
}
|
||||
- promu checksum output\
|
||||
|
||||
artifacts:
|
||||
- name: Artifacts
|
||||
path: output\**\*
|
||||
|
||||
deploy:
|
||||
- provider: GitHub
|
||||
description: WMI Exporter version $(appveyor_build_version)
|
||||
artifact: Artifacts
|
||||
auth_token:
|
||||
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_DirectoryServices_DirectoryServices
|
||||
// Partial docs: https://msdn.microsoft.com/en-us/library/ms803980.aspx
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -454,7 +454,7 @@ func NewADCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ADCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ad metrics:", desc, err)
|
||||
return err
|
||||
@@ -462,6 +462,8 @@ func (c *ADCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_DirectoryServices_DirectoryServices docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803980.aspx
|
||||
type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
|
||||
Name string
|
||||
|
||||
@@ -485,8 +487,8 @@ type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
|
||||
DatabasemodifysPersec uint32
|
||||
DatabaserecyclesPersec uint32
|
||||
DigestBindsPersec uint32
|
||||
DRAHighestUSNCommittedHighpart uint32
|
||||
DRAHighestUSNCommittedLowpart uint32
|
||||
DRAHighestUSNCommittedHighpart uint64
|
||||
DRAHighestUSNCommittedLowpart uint64
|
||||
DRAHighestUSNIssuedHighpart uint64
|
||||
DRAHighestUSNIssuedLowpart uint64
|
||||
DRAInboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
|
||||
|
||||
39
collector/collector.go
Normal file
39
collector/collector.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
// TODO: Make package-local
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
windowsEpoch = 116444736000000000
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
type ScrapeContext struct {
|
||||
perfObjects map[string]*perflib.PerfObject
|
||||
}
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
|
||||
func PrepareScrapeContext() (*ScrapeContext, error) {
|
||||
objs, err := getPerflibSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ScrapeContext{objs}, nil
|
||||
}
|
||||
282
collector/container.go
Normal file
282
collector/container.go
Normal file
@@ -0,0 +1,282 @@
|
||||
// +build windows,cgo
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["container"] = NewContainerMetricsCollector
|
||||
}
|
||||
|
||||
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
|
||||
type ContainerMetricsCollector struct {
|
||||
// Presence
|
||||
ContainerAvailable *prometheus.Desc
|
||||
|
||||
// Number of containers
|
||||
ContainersCount *prometheus.Desc
|
||||
// memory
|
||||
UsageCommitBytes *prometheus.Desc
|
||||
UsageCommitPeakBytes *prometheus.Desc
|
||||
UsagePrivateWorkingSetBytes *prometheus.Desc
|
||||
|
||||
// CPU
|
||||
RuntimeTotal *prometheus.Desc
|
||||
RuntimeUser *prometheus.Desc
|
||||
RuntimeKernel *prometheus.Desc
|
||||
|
||||
// Network
|
||||
BytesReceived *prometheus.Desc
|
||||
BytesSent *prometheus.Desc
|
||||
PacketsReceived *prometheus.Desc
|
||||
PacketsSent *prometheus.Desc
|
||||
DroppedPacketsIncoming *prometheus.Desc
|
||||
DroppedPacketsOutgoing *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
|
||||
func NewContainerMetricsCollector() (Collector, error) {
|
||||
const subsystem = "container"
|
||||
return &ContainerMetricsCollector{
|
||||
ContainerAvailable: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available"),
|
||||
"Available",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
ContainersCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "count"),
|
||||
"Number of containers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
UsageCommitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"),
|
||||
"Memory Usage Commit Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsageCommitPeakBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"),
|
||||
"Memory Usage Commit Peak Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsagePrivateWorkingSetBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"),
|
||||
"Memory Usage Private Working Set Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"),
|
||||
"Total Run time in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeUser: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"),
|
||||
"Run Time in User mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeKernel: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"),
|
||||
"Run time in Kernel mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
BytesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"),
|
||||
"Bytes Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
BytesSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"),
|
||||
"Bytes Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"),
|
||||
"Packets Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"),
|
||||
"Packets Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsIncoming: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"),
|
||||
"Dropped Incoming Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsOutgoing: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"),
|
||||
"Dropped Outgoing Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// containerClose closes the container resource
|
||||
func containerClose(c hcsshim.Container) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
|
||||
// Types Container is passed to get the containers compute systems only
|
||||
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
|
||||
if err != nil {
|
||||
log.Error("Err in Getting containers:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(containers)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainersCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, containerDetails := range containers {
|
||||
containerId := containerDetails.ID
|
||||
|
||||
container, err := hcsshim.OpenContainer(containerId)
|
||||
if container != nil {
|
||||
defer containerClose(container)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("err in opening container: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cstats, err := container.Statistics()
|
||||
if err != nil {
|
||||
log.Error("err in fetching container Statistics: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
// HCS V1 is for docker runtime. Add the docker:// prefix on container_id
|
||||
containerId = "docker://" + containerId
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainerAvailable,
|
||||
prometheus.CounterValue,
|
||||
1,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitPeakBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitPeakBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsagePrivateWorkingSetBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeUser,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeKernel,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
|
||||
if len(cstats.Network) == 0 {
|
||||
log.Info("No Network Stats for container: ", containerId)
|
||||
continue
|
||||
}
|
||||
|
||||
networkStats := cstats.Network
|
||||
|
||||
for _, networkInterface := range networkStats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsIncoming,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsIncoming),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsOutgoing,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsOutgoing),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
410
collector/cpu.go
410
collector/cpu.go
@@ -1,30 +1,106 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Processor
|
||||
// https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx - Win32_PerfRawData_PerfOS_Processor class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cpu"] = NewCPUCollector
|
||||
Factories["cpu"] = newCPUCollector
|
||||
}
|
||||
|
||||
// A CPUCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Processor metrics
|
||||
type CPUCollector struct {
|
||||
// A function to get Windows version from registry
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
type cpuCollectorBasic struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
}
|
||||
type cpuCollectorFull struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ClockInterruptsTotal *prometheus.Desc
|
||||
IdleBreakEventsTotal *prometheus.Desc
|
||||
ParkingStatus *prometheus.Desc
|
||||
ProcessorFrequencyMHz *prometheus.Desc
|
||||
ProcessorMaxFrequencyMHz *prometheus.Desc
|
||||
ProcessorPerformance *prometheus.Desc
|
||||
}
|
||||
|
||||
func NewCPUCollector() (Collector, error) {
|
||||
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
|
||||
func newCPUCollector() (Collector, error) {
|
||||
const subsystem = "cpu"
|
||||
return &CPUCollector{
|
||||
|
||||
version := getWindowsVersion()
|
||||
// Windows version by number https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
// For Windows 2008 or earlier Windows version is 6.0 or lower, where we only have the older "Processor" counters
|
||||
// For Windows 2008 R2 or later Windows version is 6.1 or higher, so we can use "ProcessorInformation" counters
|
||||
// Value 6.05 was selected just to split between Windows versions
|
||||
if version < 6.05 {
|
||||
return &cpuCollectorBasic{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &cpuCollectorFull{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
@@ -37,7 +113,6 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
@@ -50,162 +125,273 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ClockInterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
IdleBreakEventsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ParkingStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequencyMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_performance"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CPUCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cpu metrics:", desc, err)
|
||||
type perflibProcessor struct {
|
||||
Name string
|
||||
C1Transitions float64 `perflib:"C1 Transitions/sec"`
|
||||
C2Transitions float64 `perflib:"C2 Transitions/sec"`
|
||||
C3Transitions float64 `perflib:"C3 Transitions/sec"`
|
||||
DPCRate float64 `perflib:"DPC Rate"`
|
||||
DPCsQueued float64 `perflib:"DPCs Queued/sec"`
|
||||
Interrupts float64 `perflib:"Interrupts/sec"`
|
||||
PercentC2Time float64 `perflib:"% C1 Time"`
|
||||
PercentC3Time float64 `perflib:"% C2 Time"`
|
||||
PercentC1Time float64 `perflib:"% C3 Time"`
|
||||
PercentDPCTime float64 `perflib:"% DPC Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
PercentInterruptTime float64 `perflib:"% Interrupt Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessor, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfOS_Processor struct {
|
||||
Name string
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
DPCRate uint32
|
||||
DPCsQueuedPersec uint32
|
||||
InterruptsPersec uint32
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentUserTime uint64
|
||||
}
|
||||
|
||||
/* NOTE: This is an alternative class, but it is not as widely available. Decide which to use
|
||||
type Win32_PerfRawData_Counters_ProcessorInformation struct {
|
||||
Name string
|
||||
AverageIdleTime uint64
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
ClockInterruptsPersec uint64
|
||||
DPCRate uint64
|
||||
DPCsQueuedPersec uint64
|
||||
IdleBreakEventsPersec uint64
|
||||
InterruptsPersec uint64
|
||||
ParkingStatus uint64
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentofMaximumFrequency uint64
|
||||
PercentPerformanceLimit uint64
|
||||
PercentPriorityTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentPrivilegedUtility uint64
|
||||
PercentProcessorPerformance uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentProcessorUtility uint64
|
||||
PercentUserTime uint64
|
||||
PerformanceLimitFlags uint64
|
||||
ProcessorFrequency uint64
|
||||
ProcessorStateFlags uint64
|
||||
}*/
|
||||
|
||||
func (c *CPUCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_Processor
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, data := range dst {
|
||||
if strings.Contains(data.Name, "_Total") {
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
core := data.Name
|
||||
|
||||
// These are only available from Win32_PerfRawData_Counters_ProcessorInformation, which is only available from Win2008R2+
|
||||
/*ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MaximumFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentofMaximumFrequency)/100*float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)*/
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC1Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC1Time,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC2Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC2Time,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC3Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC3Time,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentIdleTime,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentInterruptTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentInterruptTime,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentDPCTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentDPCTime,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentPrivilegedTime,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentUserTime,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.InterruptsPersec),
|
||||
cpu.Interrupts,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.DPCsQueuedPersec),
|
||||
cpu.DPCsQueued,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ClockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ParkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPerformance,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// returns data points from Win32_ComputerSystem
|
||||
// https://msdn.microsoft.com/en-us/library/aa394102 - Win32_ComputerSystem class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -43,7 +42,7 @@ func NewCSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cs metrics:", desc, err)
|
||||
return err
|
||||
@@ -51,6 +50,8 @@ func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_ComputerSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394102
|
||||
type Win32_ComputerSystem struct {
|
||||
NumberOfLogicalProcessors uint32
|
||||
TotalPhysicalMemory uint64
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_DNS_DNS
|
||||
// https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -182,7 +181,7 @@ func NewDNSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting dns metrics:", desc, err)
|
||||
return err
|
||||
@@ -190,6 +189,9 @@ func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_DNS_DNS docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// - https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
type Win32_PerfRawData_DNS_DNS struct {
|
||||
AXFRRequestReceived uint32
|
||||
AXFRRequestSent uint32
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -595,7 +597,7 @@ func NewHyperVCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *HyperVCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectVmHealth(ch); err != nil {
|
||||
log.Error("failed collecting hyperV health status metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
// returns data points from the following classes:
|
||||
// - Win32_PerfRawData_W3SVC_WebService
|
||||
// - Win32_PerfRawData_APPPOOLCountersProvider_APPPOOLWAS
|
||||
// - Win32_PerfRawData_W3SVCW3WPCounterProvider_W3SVCW3WP
|
||||
// - Win32_PerfRawData_W3SVC_WebServiceCache
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -34,7 +30,12 @@ func getIISVersion() simple_version {
|
||||
log.Warn("Couldn't open registry to determine IIS version:", err)
|
||||
return simple_version{}
|
||||
}
|
||||
defer k.Close()
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
major, _, err := k.GetIntegerValue("MajorVersion")
|
||||
if err != nil {
|
||||
@@ -806,8 +807,8 @@ func NewIISCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
appWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *siteWhitelist)),
|
||||
appBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *siteBlacklist)),
|
||||
appWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *appWhitelist)),
|
||||
appBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *appBlacklist)),
|
||||
}
|
||||
|
||||
buildIIS.iis_version = getIISVersion()
|
||||
@@ -817,7 +818,7 @@ func NewIISCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *IISCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *IISCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting iis metrics:", desc, err)
|
||||
return err
|
||||
@@ -984,7 +985,7 @@ type Win32_PerfRawData_W3SVC_WebServiceCache struct {
|
||||
URICacheMisses uint32
|
||||
}
|
||||
|
||||
var ApplicationStates = map[uint32]string{
|
||||
var applicationStates = map[uint32]string{
|
||||
1: "Uninitialized",
|
||||
2: "Initialized",
|
||||
3: "Running",
|
||||
@@ -1265,7 +1266,7 @@ func (c *IISCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, e
|
||||
}
|
||||
|
||||
// Guages
|
||||
for key, label := range ApplicationStates {
|
||||
for key, label := range applicationStates {
|
||||
isCurrentState := 0.0
|
||||
if key == app.CurrentApplicationPoolState {
|
||||
isCurrentState = 1.0
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
// returns data points from Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
// https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -136,7 +134,7 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting logical_disk metrics:", desc, err)
|
||||
return err
|
||||
@@ -144,6 +142,9 @@ func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
|
||||
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
type Win32_PerfRawData_PerfDisk_LogicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength uint32
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Memory
|
||||
// <add link to documentation here> - Win32_PerfRawData_PerfOS_Memory class
|
||||
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -253,7 +256,7 @@ func NewMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting memory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_MSMQ_MSMQQueue
|
||||
// <add link to documentation here> - Win32_PerfRawData_MSMQ_MSMQQueue class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -33,7 +33,7 @@ type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
|
||||
func NewMSMQCollector() (Collector, error) {
|
||||
const subsystem = "msmq"
|
||||
|
||||
if *msmqWhereClause != "" {
|
||||
if *msmqWhereClause == "" {
|
||||
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func NewMSMQCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,22 +1,4 @@
|
||||
// returns data points from the following classes:
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerLocks
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -62,7 +44,12 @@ func getMSSQLInstances() mssqlInstancesType {
|
||||
log.Warn("Couldn't open registry to determine SQL instances:", err)
|
||||
return sqlDefaultInstance
|
||||
}
|
||||
defer k.Close()
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
instanceNames, err := k.ReadValueNames(0)
|
||||
if err != nil {
|
||||
@@ -381,7 +368,7 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
|
||||
const subsystem = "mssql"
|
||||
|
||||
MSSQLCollector := MSSQLCollector{
|
||||
mssqlCollector := MSSQLCollector{
|
||||
// meta
|
||||
mssqlScrapeDurationDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "collector_duration_seconds"),
|
||||
@@ -1653,17 +1640,17 @@ func NewMSSQLCollector() (Collector, error) {
|
||||
mssqlInstances: getMSSQLInstances(),
|
||||
}
|
||||
|
||||
MSSQLCollector.mssqlCollectors = MSSQLCollector.getMSSQLCollectors()
|
||||
mssqlCollector.mssqlCollectors = mssqlCollector.getMSSQLCollectors()
|
||||
|
||||
if *mssqlPrintCollectors {
|
||||
fmt.Printf("Available SQLServer Classes:\n")
|
||||
for name := range MSSQLCollector.mssqlCollectors {
|
||||
for name := range mssqlCollector.mssqlCollectors {
|
||||
fmt.Printf(" - %s\n", name)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
return &MSSQLCollector, nil
|
||||
return &mssqlCollector, nil
|
||||
}
|
||||
|
||||
type mssqlCollectorFunc func(ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error)
|
||||
@@ -1700,7 +1687,7 @@ func (c *MSSQLCollector) execute(name string, fn mssqlCollectorFunc, ch chan<- p
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSSQLCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *MSSQLCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
enabled := mssqlExpandEnabledCollectors(*mssqlEnabledCollectors)
|
||||
@@ -1721,6 +1708,9 @@ func (c *MSSQLCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// win32PerfRawDataSQLServerAccessMethods docs:
|
||||
// - Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object
|
||||
type win32PerfRawDataSQLServerAccessMethods struct {
|
||||
AUcleanupbatchesPersec uint64
|
||||
AUcleanupsPersec uint64
|
||||
@@ -2085,6 +2075,8 @@ func (c *MSSQLCollector) collectAccessMethods(ch chan<- prometheus.Metric, sqlIn
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica docs:
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica
|
||||
type win32PerfRawDataSQLServerAvailabilityReplica struct {
|
||||
Name string
|
||||
BytesReceivedfromReplicaPersec uint64
|
||||
@@ -2177,6 +2169,8 @@ func (c *MSSQLCollector) collectAvailabilityReplica(ch chan<- prometheus.Metric,
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager docs:
|
||||
// https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object
|
||||
type win32PerfRawDataSQLServerBufferManager struct {
|
||||
BackgroundwriterpagesPersec uint64
|
||||
Buffercachehitratio uint64
|
||||
@@ -2374,6 +2368,8 @@ func (c *MSSQLCollector) collectBufferManager(ch chan<- prometheus.Metric, sqlIn
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica
|
||||
type win32PerfRawDataSQLServerDatabaseReplica struct {
|
||||
Name string
|
||||
DatabaseFlowControlDelay uint64
|
||||
@@ -2586,6 +2582,8 @@ func (c *MSSQLCollector) collectDatabaseReplica(ch chan<- prometheus.Metric, sql
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017
|
||||
type win32PerfRawDataSQLServerDatabases struct {
|
||||
Name string
|
||||
ActiveTransactions uint64
|
||||
@@ -2974,6 +2972,8 @@ func (c *MSSQLCollector) collectDatabases(ch chan<- prometheus.Metric, sqlInstan
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object
|
||||
type win32PerfRawDataSQLServerGeneralStatistics struct {
|
||||
ActiveTempTables uint64
|
||||
ConnectionResetPersec uint64
|
||||
@@ -3186,6 +3186,8 @@ func (c *MSSQLCollector) collectGeneralStatistics(ch chan<- prometheus.Metric, s
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerLocks docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object
|
||||
type win32PerfRawDataSQLServerLocks struct {
|
||||
Name string
|
||||
AverageWaitTimems uint64
|
||||
@@ -3262,6 +3264,8 @@ func (c *MSSQLCollector) collectLocks(ch chan<- prometheus.Metric, sqlInstance s
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object
|
||||
type win32PerfRawDataSQLServerMemoryManager struct {
|
||||
ConnectionMemoryKB uint64
|
||||
DatabaseCacheMemoryKB uint64
|
||||
@@ -3443,6 +3447,8 @@ func (c *MSSQLCollector) collectMemoryManager(ch chan<- prometheus.Metric, sqlIn
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics docs:
|
||||
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object
|
||||
type win32PerfRawDataSQLServerSQLStatistics struct {
|
||||
AutoParamAttemptsPersec uint64
|
||||
BatchRequestsPersec uint64
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
// returns data points from Win32_PerfRawData_Tcpip_NetworkInterface
|
||||
|
||||
// https://technet.microsoft.com/en-us/security/aa394340(v=vs.80) (Win32_PerfRawData_Tcpip_NetworkInterface class)
|
||||
// https://msdn.microsoft.com/en-us/library/aa394216 (Win32_NetworkAdapter class)
|
||||
// https://msdn.microsoft.com/en-us/library/aa394353 (Win32_PnPEntity class)
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -136,7 +132,7 @@ func NewNetworkCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting net metrics:", desc, err)
|
||||
return err
|
||||
@@ -150,6 +146,8 @@ func mangleNetworkName(name string) string {
|
||||
return nicNameToUnderscore.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
|
||||
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
|
||||
type Win32_PerfRawData_Tcpip_NetworkInterface struct {
|
||||
BytesReceivedPerSec uint64
|
||||
BytesSentPerSec uint64
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import "testing"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRExceptions
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRExceptions class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRInterop
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRInterop class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -46,7 +46,7 @@ func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRJit
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRJit class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRJitCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRLoading
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRLoading class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -88,7 +88,7 @@ func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -74,7 +74,7 @@ func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRMemory
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRMemory class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -112,7 +112,7 @@ func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRRemoting
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRRemoting class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -67,7 +67,7 @@ func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRSecurity
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRSecurity class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// returns data points from Win32_OperatingSystem
|
||||
// https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -114,7 +113,7 @@ func NewOSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting os metrics:", desc, err)
|
||||
return err
|
||||
@@ -122,6 +121,8 @@ func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_OperatingSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
type Win32_OperatingSystem struct {
|
||||
FreePhysicalMemory uint64
|
||||
FreeSpaceInPagingFiles uint64
|
||||
|
||||
89
collector/perflib.go
Normal file
89
collector/perflib.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func getPerflibSnapshot() (map[string]*perflib.PerfObject, error) {
|
||||
objects, err := perflib.QueryPerformanceData("Global")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexed := make(map[string]*perflib.PerfObject)
|
||||
for _, obj := range objects {
|
||||
indexed[obj.Name] = obj
|
||||
}
|
||||
return indexed, nil
|
||||
}
|
||||
|
||||
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("counter not found")
|
||||
}
|
||||
rv := reflect.ValueOf(vs)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return fmt.Errorf("%v is nil or not a pointer to slice", reflect.TypeOf(vs))
|
||||
}
|
||||
ev := rv.Elem()
|
||||
if ev.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("%v is not slice", reflect.TypeOf(vs))
|
||||
}
|
||||
|
||||
// Ensure sufficient length
|
||||
if ev.Cap() < len(obj.Instances) {
|
||||
nvs := reflect.MakeSlice(ev.Type(), len(obj.Instances), len(obj.Instances))
|
||||
ev.Set(nvs)
|
||||
}
|
||||
|
||||
for idx, instance := range obj.Instances {
|
||||
target := ev.Index(idx)
|
||||
rt := target.Type()
|
||||
|
||||
counters := make(map[string]*perflib.PerfCounter, len(instance.Counters))
|
||||
for _, ctr := range instance.Counters {
|
||||
if ctr.Def.IsBaseValue && !ctr.Def.IsNanosecondCounter {
|
||||
counters[ctr.Def.Name+"_Base"] = ctr
|
||||
} else {
|
||||
counters[ctr.Def.Name] = ctr
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
tag := f.Tag.Get("perflib")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ctr, found := counters[tag]
|
||||
if !found {
|
||||
log.Debugf("missing counter %q, has %v", tag, counters)
|
||||
return fmt.Errorf("could not find counter %q on instance", tag)
|
||||
}
|
||||
if !target.Field(i).CanSet() {
|
||||
return fmt.Errorf("tagged field %v cannot be written to", f)
|
||||
}
|
||||
|
||||
switch ctr.Def.CounterType {
|
||||
case perflibCollector.PERF_ELAPSED_TIME:
|
||||
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
|
||||
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
|
||||
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
|
||||
default:
|
||||
target.Field(i).SetFloat(float64(ctr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Name != "" && target.FieldByName("Name").CanSet() {
|
||||
target.FieldByName("Name").SetString(instance.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_PerfProc_Process
|
||||
// https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx - Win32_PerfRawData_PerfProc_Process class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -135,7 +135,7 @@ func NewProcessCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ProcessCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ProcessCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting process metrics:", desc, err)
|
||||
return err
|
||||
@@ -143,6 +143,8 @@ func (c *ProcessCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfProc_Process docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx
|
||||
type Win32_PerfRawData_PerfProc_Process struct {
|
||||
Name string
|
||||
CreatingProcessID uint32
|
||||
@@ -191,7 +193,9 @@ func (c *ProcessCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
|
||||
|
||||
var dst_wp []WorkerProcess
|
||||
q_wp := queryAll(&dst_wp)
|
||||
wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration")
|
||||
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
|
||||
log.Debugf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err)
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_Service
|
||||
// https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx - Win32_Service class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -64,7 +64,7 @@ func NewserviceCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting service metrics:", desc, err)
|
||||
return err
|
||||
@@ -72,6 +72,8 @@ func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_Service docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx
|
||||
type Win32_Service struct {
|
||||
Name string
|
||||
State string
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_System class
|
||||
// https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -70,7 +69,7 @@ func NewSystemCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting system metrics:", desc, err)
|
||||
return err
|
||||
@@ -78,6 +77,8 @@ func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfOS_System docs:
|
||||
// - https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
type Win32_PerfRawData_PerfOS_System struct {
|
||||
ContextSwitchesPersec uint32
|
||||
ExceptionDispatchesPersec uint32
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
// returns data points from Win32_PerfRawData_Tcpip_TCPv4
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx (Win32_PerfRawData_Tcpip_TCPv4 class)
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -92,7 +90,7 @@ func NewTCPCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
@@ -100,6 +98,8 @@ func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Tcpip_TCPv4 docs
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx
|
||||
type Win32_PerfRawData_Tcpip_TCPv4 struct {
|
||||
ConnectionFailures uint64
|
||||
ConnectionsActive uint64
|
||||
|
||||
@@ -212,7 +212,7 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
// Update implements the Collector interface.
|
||||
func (c *textFileCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
error := 0.0
|
||||
mtimes := map[string]time.Time{}
|
||||
|
||||
@@ -237,9 +237,17 @@ fileLoop:
|
||||
continue
|
||||
}
|
||||
var parser expfmt.TextParser
|
||||
r := utfbom.SkipOnly(carriageReturnFilteringReader{r: file})
|
||||
r, encoding := utfbom.Skip(carriageReturnFilteringReader{r: file})
|
||||
if err = checkBOM(encoding); err != nil {
|
||||
log.Errorf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error())
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
parsedFamilies, err := parser.TextToMetricFamilies(r)
|
||||
file.Close()
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil {
|
||||
log.Warnf("Error closing file: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing %q: %v", path, err)
|
||||
error = 1.0
|
||||
@@ -281,3 +289,11 @@ fileLoop:
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkBOM(encoding utfbom.Encoding) error {
|
||||
if encoding == utfbom.Unknown || encoding == utfbom.UTF8 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf(encoding.String())
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"strings"
|
||||
"github.com/dimchansky/utfbom"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCRFilter(t *testing.T) {
|
||||
sr := strings.NewReader("line 1\r\nline 2")
|
||||
cr := carriageReturnFilteringReader{ r: sr }
|
||||
cr := carriageReturnFilteringReader{r: sr}
|
||||
b, err := ioutil.ReadAll(cr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@@ -17,4 +18,30 @@ func TestCRFilter(t *testing.T) {
|
||||
if string(b) != "line 1\nline 2" {
|
||||
t.Errorf("Unexpected output %q", b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckBOM(t *testing.T) {
|
||||
testdata := []struct {
|
||||
encoding utfbom.Encoding
|
||||
err string
|
||||
}{
|
||||
{utfbom.Unknown, ""},
|
||||
{utfbom.UTF8, ""},
|
||||
{utfbom.UTF16BigEndian, "UTF16BigEndian"},
|
||||
{utfbom.UTF16LittleEndian, "UTF16LittleEndian"},
|
||||
{utfbom.UTF32BigEndian, "UTF32BigEndian"},
|
||||
{utfbom.UTF32LittleEndian, "UTF32LittleEndian"},
|
||||
}
|
||||
for _, d := range testdata {
|
||||
err := checkBOM(d.encoding)
|
||||
if d.err == "" && err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if d.err != "" && err == nil {
|
||||
t.Errorf("Missing expected error %s", d.err)
|
||||
}
|
||||
if err != nil && !strings.Contains(err.Error(), d.err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_vmGuestLib_VMem and Win32_PerfRawData_vmGuestLib_VCPU
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -37,6 +38,7 @@ type VmwareCollector struct {
|
||||
HostProcessorSpeedMHz *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewVmwareCollector constructs a new VmwareCollector
|
||||
func NewVmwareCollector() (Collector, error) {
|
||||
const subsystem = "vmware"
|
||||
return &VmwareCollector{
|
||||
@@ -160,7 +162,7 @@ func NewVmwareCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *VmwareCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectMem(ch); err != nil {
|
||||
log.Error("failed collecting vmware memory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -4,27 +4,9 @@ import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
func className(src interface{}) string {
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
|
||||
140
contrib/console_templates/wmi-overview.html
Normal file
140
contrib/console_templates/wmi-overview.html
Normal file
@@ -0,0 +1,140 @@
|
||||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="cpuGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(wmi_cpu_time_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Cores'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Network Utilization</h3>
|
||||
<div id="networkioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#networkioGraph"),
|
||||
expr: [
|
||||
"irate(wmi_net_bytes_sent_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
|
||||
"irate(wmi_net_bytes_received_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
|
||||
],
|
||||
min: 0,
|
||||
name: [ 'sent', 'received' ],
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "B",
|
||||
yTitle: 'Network IO'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Disk I/O Utilization</h3>
|
||||
<div id="diskioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#diskioGraph"),
|
||||
expr: [
|
||||
"100 - irate(wmi_logical_disk_idle_seconds_total{job='node',instance='{{ .Params.instance }}',volume!~'^HarddiskVolume.*$'}[5m]) * 100",
|
||||
],
|
||||
min: 0,
|
||||
name: '[[ volume ]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: 'Disk I/O Utilization'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="memoryGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#memoryGraph"),
|
||||
renderer: 'area',
|
||||
expr: [
|
||||
"wmi_cs_physical_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"wmi_os_physical_memory_free_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"wmi_cs_physical_memory__bytes{job='node',instance='{{ .Params.instance }}'} - wmi_os_physical_memory_free_bytes{job='node',instance='{{.Params.instance}}'}",
|
||||
"wmi_os_virtual_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
],
|
||||
name: ["Physical", "Free", "Used", "Virtual"],
|
||||
min: 0,
|
||||
yUnits: "B",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yTitle: 'Memory'
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr><th colspan="2">Overview</th></tr>
|
||||
<tr>
|
||||
<td>User CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(wmi_cpu_time_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Privileged CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(wmi_cpu_time_total{job='node',instance='%s',mode='privileged'}[5m])) * 100 / count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Total</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "wmi_cs_physical_memory_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Free</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "wmi_os_physical_memory_free_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Network</th>
|
||||
</tr>
|
||||
{{ range printf "wmi_net_bytes_received_total{job='node',instance='%s',nic!='isatap_ec2_internal'}" .Params.instance | query | sortByLabel "nic" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.nic }} Received</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_net_bytes_received_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{{ .Labels.nic }} Transmitted</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_net_bytes_sent_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Disks</th>
|
||||
</tr>
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }} Utilization</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - irate(wmi_logical_disk_idle_seconds_total{job='node',instance='%s',volume='%s'}[5m]) * 100" .Labels.instance .Labels.volume) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }} Throughput</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_logical_disk_read_bytes_total{job='node',instance='%s',volume='%s'}[5m]) + irate(wmi_logical_disk_write_bytes_total{job='node',instance='%s',volume='%s'}[5m])" .Labels.instance .Labels.volume .Labels.instance .Labels.volume) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Filesystem Fullness</th>
|
||||
</tr>
|
||||
{{ define "roughlyNearZero" }}
|
||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ end }}
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - wmi_logical_disk_free_bytes{job='node',instance='%s',volume='%s'} / wmi_logical_disk_size_bytes{job='node'} * 100" .Labels.instance .Labels.volume) "%" "roughlyNearZero") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
||||
@@ -9,6 +9,7 @@ This directory contains documentation of the collectors in the WMI exporter, wit
|
||||
- [`hyperv`](collector.hyperv.md)
|
||||
- [`iis`](collector.iis.md)
|
||||
- [`logical_disk`](collector.logical_disk.md)
|
||||
- [`memory`](collector.memory.md)
|
||||
- [`msmq`](collector.msmq.md)
|
||||
- [`mssql`](collector.mssql.md)
|
||||
- [`netframework_clrexceptions`](collector.netframework_clrexceptions.md)
|
||||
@@ -26,4 +27,4 @@ This directory contains documentation of the collectors in the WMI exporter, wit
|
||||
- [`system`](collector.system.md)
|
||||
- [`tcp`](collector.tcp.md)
|
||||
- [`textfile`](collector.textfile.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The %name% collector exposes metrics about ...
|
||||
|
||||
Metric name prefix | `%name%`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `%name%`
|
||||
Classes | [`...`](https://msdn.microsoft.com/en-us/library/...)
|
||||
Enabled by default? | Yes/No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The ad collector exposes metrics about a Active Directory Domain Services domain controller
|
||||
|
||||
Metric name prefix | `ad`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `ad`
|
||||
Classes | [`Win32_PerfRawData_DirectoryServices_DirectoryServices`](https://msdn.microsoft.com/en-us/library/ms803980.aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
42
docs/collector.container.md
Normal file
42
docs/collector.container.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# container collector
|
||||
|
||||
The container collector exposes metrics about containers running on system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `container`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_container_available` | Available | counter | `container_id`
|
||||
`wmi_container_count` | Number of containers | gauge | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_kernelmode` | Run time in Kernel mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_usermode` | Run Time in User mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_total` | Total Run time in Seconds | counter | `container_id`
|
||||
`wmi_container_memory_usage_commit_bytes` | Memory Usage Commit Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_commit_peak_bytes` | Memory Usage Commit Peak Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_private_working_set_bytes` | Memory Usage Private Working Set Bytes | gauge | `container_id`
|
||||
`wmi_container_network_receive_bytes_total` | Bytes Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_total` | Packets Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_dropped_total` | Dropped Incoming Packets on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_bytes_total` | Bytes Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_total` | Packets Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_dropped_total` | Dropped Outgoing Packets on Interface | counter | `container_id`, `interface`
|
||||
|
||||
### Example metric
|
||||
_wmi_container_network_receive_bytes_total{container_id="docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e",interface="822179E7-002C-4280-ABBA-28BCFE401826"} 9.3305343e+07_
|
||||
|
||||
This metric means that total _9.3305343e+07_ bytes received on interface _822179E7-002C-4280-ABBA-28BCFE401826_ for container _docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
@@ -2,8 +2,11 @@
|
||||
|
||||
The cpu collector exposes metrics about CPU usage
|
||||
|
||||
Metric name prefix | `cpu`
|
||||
Classes | [`Win32_PerfRawData_PerfOS_Processor`](https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx)
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cpu`
|
||||
Data source | Perflib
|
||||
Counters | `ProcessorInformation` (Windows Server 2008R2 and later) `Processor` (older versions)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
@@ -11,6 +14,7 @@ Enabled by default? | Yes
|
||||
None
|
||||
|
||||
## Metrics
|
||||
These metrics are available on all versions of Windows:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
@@ -19,6 +23,16 @@ Name | Description | Type | Labels
|
||||
`wmi_cpu_interrupts_total` | Total number of received and serviced hardware interrupts | counter | `core`
|
||||
`wmi_cpu_dpcs_total` | Total number of received and serviced deferred procedure calls (DPCs) | counter | `core`
|
||||
|
||||
These metrics are only exposed on Windows Server 2008R2 and later:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_clock_interrupts_total` | Total number of received and serviced clock tick interrupts | `core`
|
||||
`wmi_cpu_idle_break_events_total` | Total number of time processor was woken from idle | `core`
|
||||
`wmi_cpu_parking_status` | Parking Status represents whether a processor is parked or not | `gauge`
|
||||
`wmi_cpu_core_frequency_mhz` | Core frequency in megahertz | `gauge`
|
||||
`wmi_cpu_processor_performance` | Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100% | `gauge`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The cs collector exposes metrics detailing the hardware of the computer system
|
||||
|
||||
Metric name prefix | `cs`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cs`
|
||||
Classes | [`Win32_ComputerSystem`](https://msdn.microsoft.com/en-us/library/aa394102)
|
||||
Enabled by default? | Yes
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The dns collector exposes metrics about the DNS server
|
||||
|
||||
Metric name prefix | `dns`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `dns`
|
||||
Classes | [`Win32_PerfRawData_DNS_DNS`](https://technet.microsoft.com/en-us/library/cc977686.aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
|
||||
The hyperv collector exposes metrics about the Hyper-V hypervisor
|
||||
|
||||
Metric name prefix | `hyperv`
|
||||
Classes | `Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary`, `Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition`, `Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition`, `Win32_PerfRawData_HvStats_HyperVHypervisor`, `Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor`, `Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor`, `Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch`, `Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter`, `Win32_PerfRawData_Counters_HyperVVirtualStorageDevice`, `Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `hyperv`
|
||||
Classes | `Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary`<br/>`Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisor`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor`<br/>`Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch`<br/>`Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter`<br/>`Win32_PerfRawData_Counters_HyperVVirtualStorageDevice`<br/>`Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
|
||||
The iis collector exposes metrics about the IIS server
|
||||
|
||||
Metric name prefix | `iis`
|
||||
Classes | `Win32_PerfRawData_W3SVC_WebService`, `Win32_PerfRawData_APPPOOLCountersProvider_APPPOOLWAS`, `Win32_PerfRawData_W3SVCW3WPCounterProvider_W3SVCW3WP`, `Win32_PerfRawData_W3SVC_WebServiceCache`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `iis`
|
||||
Classes | `Win32_PerfRawData_W3SVC_WebService`<br/>`Win32_PerfRawData_APPPOOLCountersProvider_APPPOOLWAS`<br/>`Win32_PerfRawData_W3SVCW3WPCounterProvider_W3SVCW3WP`<br/>`Win32_PerfRawData_W3SVC_WebServiceCache`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|
||||
|
||||
Metric name prefix | `logical_disk`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logical_disk`
|
||||
Classes | [`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71))
|
||||
Enabled by default? | Yes
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
The memory collector exposes metrics about system memory usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `memory`
|
||||
Classes | `Win32_PerfRawData_PerfOS_Memory`
|
||||
Enabled by default? | Yes
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The msmq collector exposes metrics about the queues on a MSMQ server
|
||||
|
||||
Metric name prefix | `msmq`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `msmq`
|
||||
Classes | `Win32_PerfRawData_MSMQ_MSMQQueue`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
|
||||
The mssql collector exposes metrics about the MSSQL server
|
||||
|
||||
Metric name prefix | `mssql`
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object), [`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica), [`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object), [`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica), [`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017), [`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object), [`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object), [`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object), [`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `mssql`
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The net collector exposes metrics about network interfaces
|
||||
|
||||
Metric name prefix | `net`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `net`
|
||||
Classes | [`Win32_PerfRawData_Tcpip_NetworkInterface`](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80))
|
||||
Enabled by default? | Yes
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrexceptions collector exposes metrics about CLR exceptions in the dotnet framework.
|
||||
|
||||
Metric name prefix | `netframework_clrexceptions`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrexceptions`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRExceptions`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrinterop collector exposes metrics about interop between the dotnet framework and outside components.
|
||||
|
||||
Metric name prefix | `netframework_clrinterop`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrinterop`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRInterop`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrjit collector exposes metrics about the dotnet Just-in-Time compiler.
|
||||
|
||||
Metric name prefix | `netframework_clrjit`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrjit`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRJit`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrloading collector exposes metrics about the dotnet loader.
|
||||
|
||||
Metric name prefix | `netframework_clrloading`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrloading`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRLoading`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrlocksandthreads collector exposes metrics about locks and threads in dotnet applications.
|
||||
|
||||
Metric name prefix | `netframework_clrlocksandthreads`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrlocksandthreads`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrmemory collector exposes metrics about memory in dotnet applications.
|
||||
|
||||
Metric name prefix | `netframework_clrmemory`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrmemory`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRMemory`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrremoting collector exposes metrics about dotnet remoting.
|
||||
|
||||
Metric name prefix | `netframework_clrremoting`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrremoting`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRRemoting`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The netframework_clrsecurity collector exposes metrics about security checks in dotnet applications
|
||||
|
||||
Metric name prefix | `netframework_clrsecurity`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrsecurity`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRSecurity`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The os collector exposes metrics about the operating system
|
||||
|
||||
Metric name prefix | `os`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `os`
|
||||
Classes | [`Win32_OperatingSystem`](https://msdn.microsoft.com/en-us/library/aa394239)
|
||||
Enabled by default? | Yes
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The process collector exposes metrics about processes
|
||||
|
||||
Metric name prefix | `process`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `process`
|
||||
Classes | [`Win32_PerfRawData_PerfProc_Process`](https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The service collector exposes metrics about Windows Services
|
||||
|
||||
Metric name prefix | `service`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `service`
|
||||
Classes | [`Win32_Service`](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx)
|
||||
Enabled by default? | Yes
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The system collector exposes metrics about ...
|
||||
|
||||
Metric name prefix | `system`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `system`
|
||||
Classes | [`Win32_PerfRawData_PerfOS_System`](https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp)
|
||||
Enabled by default? | Yes
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The tcp collector exposes metrics about the TCP/IPv4 network stack.
|
||||
|
||||
Metric name prefix | `tcp`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `tcp`
|
||||
Classes | [`Win32_PerfRawData_Tcpip_TCPv4`](https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The textfile collector exposes metrics from files written by other processes.
|
||||
|
||||
Metric name prefix | `textfile`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `textfile`
|
||||
Classes | None
|
||||
Enabled by default? | Yes
|
||||
|
||||
@@ -10,7 +12,7 @@ Enabled by default? | Yes
|
||||
|
||||
### `--collector.textfile.directory`
|
||||
|
||||
The directory containing the files to be ingested.
|
||||
The directory containing the files to be ingested. Only files with the extension `.prom` are read.
|
||||
|
||||
Default value: `C:\Program Files\wmi_exporter\textfile_inputs`
|
||||
|
||||
@@ -34,3 +36,21 @@ _This collector does not yet have any useful queries added, we would appreciate
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
# Example use
|
||||
This Powershell script, when run in the `collector.textfile.directory` (default `C:\Program Files\wmi_exporter\textfile_inputs`), generates a valid `.prom` file that should successfully ingested by wmi_exporter.
|
||||
|
||||
```Powershell
|
||||
$alpha = 42
|
||||
$beta = @{ left=3.1415; right=2.718281828; }
|
||||
|
||||
Set-Content -Path test1.prom -Encoding Ascii -NoNewline -Value ""
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# HELP test_alpha_total Some random metric.`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# TYPE test_alpha_total counter`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "test_alpha_total ${alpha}`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# HELP test_beta_bytes Some other metric.`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# TYPE test_beta_bytes gauge`n"
|
||||
foreach ($k in $beta.Keys) {
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "test_beta_bytes{spin=""${k}""} $( $beta[$k] )`n"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The vmware collector exposes metrics about a VMware guest VM
|
||||
|
||||
Metric name prefix | `vmware`
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `vmware`
|
||||
Classes | `Win32_PerfRawData_vmGuestLib_VMem`, `Win32_PerfRawData_vmGuestLib_VCPU`
|
||||
Enabled by default? | No
|
||||
|
||||
|
||||
112
exporter.go
112
exporter.go
@@ -1,8 +1,9 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -22,7 +23,8 @@ import (
|
||||
|
||||
// WmiCollector implements the prometheus.Collector interface.
|
||||
type WmiCollector struct {
|
||||
collectors map[string]collector.Collector
|
||||
maxScrapeDuration time.Duration
|
||||
collectors map[string]collector.Collector
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -44,6 +46,12 @@ var (
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
scrapeTimeoutDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "collector_timeout"),
|
||||
"wmi_exporter: Whether the collector timed out.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
|
||||
// This can be removed when client_golang exposes this on Windows
|
||||
// (See https://github.com/prometheus/client_golang/issues/376)
|
||||
@@ -64,15 +72,48 @@ func (coll WmiCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
}
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus. Collect could be called several times concurrently
|
||||
// and thus its run is protected by a single mutex.
|
||||
// prometheus.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
scrapeContext, err := collector.PrepareScrapeContext()
|
||||
if err != nil {
|
||||
ch <- prometheus.NewInvalidMetric(scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
remainingCollectors := make(map[string]bool)
|
||||
for name := range coll.collectors {
|
||||
remainingCollectors[name] = true
|
||||
}
|
||||
|
||||
metricsBuffer := make(chan prometheus.Metric)
|
||||
allDone := make(chan struct{})
|
||||
stopped := false
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case m := <-metricsBuffer:
|
||||
if !stopped {
|
||||
ch <- m
|
||||
}
|
||||
case <-allDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(allDone)
|
||||
close(metricsBuffer)
|
||||
}()
|
||||
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
execute(name, c, ch)
|
||||
execute(name, c, scrapeContext, metricsBuffer)
|
||||
wg.Done()
|
||||
delete(remainingCollectors, name)
|
||||
}(name, c)
|
||||
}
|
||||
|
||||
@@ -81,7 +122,33 @@ func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
prometheus.CounterValue,
|
||||
startTime,
|
||||
)
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case <-allDone:
|
||||
stopped = true
|
||||
return
|
||||
case <-time.After(coll.maxScrapeDuration):
|
||||
stopped = true
|
||||
remainingCollectorNames := make([]string, 0, len(remainingCollectors))
|
||||
for rc := range remainingCollectors {
|
||||
remainingCollectorNames = append(remainingCollectorNames, rc)
|
||||
}
|
||||
log.Warn("Collection timed out, still waiting for ", remainingCollectorNames)
|
||||
for name := range remainingCollectors {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
0.0,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeTimeoutDesc,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
name,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterAvailableCollectors(collectors string) string {
|
||||
@@ -95,9 +162,9 @@ func filterAvailableCollectors(collectors string) string {
|
||||
return strings.Join(availableCollectors, ",")
|
||||
}
|
||||
|
||||
func execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
|
||||
func execute(name string, c collector.Collector, ctx *collector.ScrapeContext, ch chan<- prometheus.Metric) {
|
||||
begin := time.Now()
|
||||
err := c.Collect(ch)
|
||||
err := c.Collect(ctx, ch)
|
||||
duration := time.Since(begin)
|
||||
var success float64
|
||||
|
||||
@@ -120,6 +187,12 @@ func execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
|
||||
success,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeTimeoutDesc,
|
||||
prometheus.GaugeValue,
|
||||
0.0,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
func expandEnabledCollectors(enabled string) []string {
|
||||
@@ -191,6 +264,10 @@ func main() {
|
||||
"collectors.print",
|
||||
"If true, print available collectors and exit.",
|
||||
).Bool()
|
||||
maxScrapeDuration = kingpin.Flag(
|
||||
"scrape.max-duration",
|
||||
"Time after which collectors are aborted during a scrape",
|
||||
).Default("30s").Duration()
|
||||
)
|
||||
|
||||
log.AddFlags(kingpin.CommandLine)
|
||||
@@ -220,7 +297,12 @@ func main() {
|
||||
|
||||
stopCh := make(chan bool)
|
||||
if !isInteractive {
|
||||
go svc.Run(serviceName, &wmiExporterService{stopCh: stopCh})
|
||||
go func() {
|
||||
err = svc.Run(serviceName, &wmiExporterService{stopCh: stopCh})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to start service: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
collectors, err := loadCollectors(*enabledCollectors)
|
||||
@@ -230,8 +312,11 @@ func main() {
|
||||
|
||||
log.Infof("Enabled collectors: %v", strings.Join(keys(collectors), ", "))
|
||||
|
||||
nodeCollector := WmiCollector{collectors: collectors}
|
||||
prometheus.MustRegister(nodeCollector)
|
||||
exporter := WmiCollector{
|
||||
collectors: collectors,
|
||||
maxScrapeDuration: *maxScrapeDuration,
|
||||
}
|
||||
prometheus.MustRegister(exporter)
|
||||
|
||||
http.Handle(*metricsPath, promhttp.Handler())
|
||||
http.HandleFunc("/health", healthCheck)
|
||||
@@ -257,7 +342,10 @@ func main() {
|
||||
|
||||
func healthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
io.WriteString(w, `{"status":"ok"}`)
|
||||
_, err := fmt.Fprintln(w, `{"status":"ok"}`)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to write to stream: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func keys(m map[string]collector.Collector) []string {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
28
gometalinter.config
Normal file
28
gometalinter.config
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"Disable": [
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gosec",
|
||||
"maligned",
|
||||
"megacheck"
|
||||
],
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"golint",
|
||||
"gotype",
|
||||
"gotypex",
|
||||
"ineffassign",
|
||||
"interfacer",
|
||||
"structcheck",
|
||||
"unconvert",
|
||||
"varcheck",
|
||||
"vet",
|
||||
"vetshadow"
|
||||
],
|
||||
"Exclude": [
|
||||
"don't use underscores in Go names",
|
||||
"exported type .+ should have comment or be unexported",
|
||||
"should be"
|
||||
]
|
||||
}
|
||||
@@ -19,6 +19,9 @@
|
||||
<Property Id="ENABLED_COLLECTORS" Secure="yes"/>
|
||||
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="--collectors.enabled [ENABLED_COLLECTORS]">ENABLED_COLLECTORS</SetProperty>
|
||||
|
||||
<Property Id="EXTRA_FLAGS" Secure="yes"/>
|
||||
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]">EXTRA_FLAGS</SetProperty>
|
||||
|
||||
<Property Id="LISTEN_ADDR" Secure="yes" />
|
||||
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
|
||||
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.addr [LISTEN_ADDR]:[LISTEN_PORT]">LISTEN_ADDR OR LISTEN_PORT</SetProperty>
|
||||
@@ -42,10 +45,11 @@
|
||||
<File Id="wmi_exporter.exe" Name="wmi_exporter.exe" Source="Work\wmi_exporter.exe" KeyPath="yes">
|
||||
<fw:FirewallException Id="MetricsEndpoint" Name="WMI Exporter (HTTP [LISTEN_PORT])" Description="WMI Exporter HTTP endpoint" Port="[LISTEN_PORT]" Protocol="tcp" Scope="any" IgnoreFailure="yes" />
|
||||
</File>
|
||||
<ServiceInstall Id="InstallExporterService" Name="wmi_exporter" DisplayName="WMI exporter" Description="Exports Prometheus metrics from WMI queries" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="--log.format logger:eventlog?name=wmi_exporter [CollectorsFlag] [ListenFlag] [MetricsPathFlag] [TextfileDirFlag]">
|
||||
<ServiceInstall Id="InstallExporterService" Name="wmi_exporter" DisplayName="WMI exporter" Description="Exports Prometheus metrics from WMI queries" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="--log.format logger:eventlog?name=wmi_exporter [CollectorsFlag] [ListenFlag] [MetricsPathFlag] [TextfileDirFlag] [ExtraFlags]">
|
||||
<util:ServiceConfig FirstFailureActionType="restart" SecondFailureActionType="restart" ThirdFailureActionType="restart" RestartServiceDelayInSeconds="5" />
|
||||
</ServiceInstall>
|
||||
<ServiceControl Id="ServiceStateControl" Name="wmi_exporter" Remove="uninstall" Start="install" Stop="both" />
|
||||
<util:EventSource Log="Application" Name="wmi_exporter" EventMessageFile="%SystemRoot%\System32\EventCreate.exe" />
|
||||
</Component>
|
||||
<Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14">
|
||||
<CreateFolder />
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// returns data points from {{ .Class }}
|
||||
// <add link to documentation here> - {{ .Class }} class
|
||||
package collector
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
@@ -38,6 +36,8 @@ func (c *{{ .CollectorName }}Collector) Collect(ch chan<- prometheus.Metric) err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// {{ .Class }} docs:
|
||||
// - <add link to documentation here>
|
||||
type {{ .Class }} struct {
|
||||
Name string
|
||||
{{ range $m := .Members }}
|
||||
|
||||
@@ -25,7 +25,7 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
var data TemplateData
|
||||
if err := json.Unmarshal(bytes, &data); err != nil {
|
||||
if err = json.Unmarshal(bytes, &data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -33,11 +33,11 @@ func main() {
|
||||
"toLower": strings.ToLower,
|
||||
"toSnakeCase": toSnakeCase,
|
||||
}
|
||||
template, err := template.New("template").Funcs(funcs).ParseFiles("collector.template")
|
||||
tmpl, err := template.New("template").Funcs(funcs).ParseFiles("collector.template")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = template.ExecuteTemplate(os.Stdout, "collector.template", data)
|
||||
err = tmpl.ExecuteTemplate(os.Stdout, "collector.template", data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.exe
|
||||
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# go-winio
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tar implements access to tar archives.
|
||||
// It aims to cover most of the variations, including those produced
|
||||
// by GNU and BSD tars.
|
||||
//
|
||||
// References:
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
||||
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 512
|
||||
|
||||
// Types
|
||||
TypeReg = '0' // regular file
|
||||
TypeRegA = '\x00' // regular file
|
||||
TypeLink = '1' // hard link
|
||||
TypeSymlink = '2' // symbolic link
|
||||
TypeChar = '3' // character device node
|
||||
TypeBlock = '4' // block device node
|
||||
TypeDir = '5' // directory
|
||||
TypeFifo = '6' // fifo node
|
||||
TypeCont = '7' // reserved
|
||||
TypeXHeader = 'x' // extended header
|
||||
TypeXGlobalHeader = 'g' // global extended header
|
||||
TypeGNULongName = 'L' // Next file has a long name
|
||||
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
||||
TypeGNUSparse = 'S' // sparse file
|
||||
)
|
||||
|
||||
// A Header represents a single header in a tar archive.
|
||||
// Some fields may not be populated.
|
||||
type Header struct {
|
||||
Name string // name of header file entry
|
||||
Mode int64 // permission and mode bits
|
||||
Uid int // user id of owner
|
||||
Gid int // group id of owner
|
||||
Size int64 // length in bytes
|
||||
ModTime time.Time // modified time
|
||||
Typeflag byte // type of header entry
|
||||
Linkname string // target name of link
|
||||
Uname string // user name of owner
|
||||
Gname string // group name of owner
|
||||
Devmajor int64 // major number of character or block device
|
||||
Devminor int64 // minor number of character or block device
|
||||
AccessTime time.Time // access time
|
||||
ChangeTime time.Time // status change time
|
||||
CreationTime time.Time // creation time
|
||||
Xattrs map[string]string
|
||||
Winheaders map[string]string
|
||||
}
|
||||
|
||||
// File name constants from the tar spec.
|
||||
const (
|
||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||
)
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements os.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
h *Header
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||
|
||||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Base(path.Clean(fi.h.Name))
|
||||
}
|
||||
return path.Base(fi.h.Name)
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// Set file permission bits.
|
||||
mode = os.FileMode(fi.h.Mode).Perm()
|
||||
|
||||
// Set setuid, setgid and sticky bits.
|
||||
if fi.h.Mode&c_ISUID != 0 {
|
||||
// setuid
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if fi.h.Mode&c_ISGID != 0 {
|
||||
// setgid
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if fi.h.Mode&c_ISVTX != 0 {
|
||||
// sticky
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
|
||||
// Set file mode bits.
|
||||
// clear perm, setuid, setgid and sticky bits.
|
||||
m := os.FileMode(fi.h.Mode) &^ 07777
|
||||
if m == c_ISDIR {
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
}
|
||||
if m == c_ISFIFO {
|
||||
// named pipe (FIFO)
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
if m == c_ISLNK {
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
}
|
||||
if m == c_ISBLK {
|
||||
// device file
|
||||
mode |= os.ModeDevice
|
||||
}
|
||||
if m == c_ISCHR {
|
||||
// Unix character device
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
}
|
||||
if m == c_ISSOCK {
|
||||
// Unix domain socket
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
|
||||
switch fi.h.Typeflag {
|
||||
case TypeSymlink:
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
case TypeChar:
|
||||
// character device node
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case TypeBlock:
|
||||
// block device node
|
||||
mode |= os.ModeDevice
|
||||
case TypeDir:
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
case TypeFifo:
|
||||
// fifo node
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
// Mode constants from the tar spec.
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
const (
|
||||
paxAtime = "atime"
|
||||
paxCharset = "charset"
|
||||
paxComment = "comment"
|
||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||
paxCreationTime = "LIBARCHIVE.creationtime"
|
||||
paxGid = "gid"
|
||||
paxGname = "gname"
|
||||
paxLinkpath = "linkpath"
|
||||
paxMtime = "mtime"
|
||||
paxPath = "path"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxUname = "uname"
|
||||
paxXattr = "SCHILY.xattr."
|
||||
paxWindows = "MSWINDOWS."
|
||||
paxNone = ""
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
// Because os.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
// of the returned header to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("tar: FileInfo is nil")
|
||||
}
|
||||
fm := fi.Mode()
|
||||
h := &Header{
|
||||
Name: fi.Name(),
|
||||
ModTime: fi.ModTime(),
|
||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Mode |= c_ISREG
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Mode |= c_ISDIR
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Mode |= c_ISLNK
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Mode |= c_ISCHR
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Mode |= c_ISBLK
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
h.Mode |= c_ISFIFO
|
||||
case fm&os.ModeSocket != 0:
|
||||
h.Mode |= c_ISSOCK
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
if fm&os.ModeSetuid != 0 {
|
||||
h.Mode |= c_ISUID
|
||||
}
|
||||
if fm&os.ModeSetgid != 0 {
|
||||
h.Mode |= c_ISGID
|
||||
}
|
||||
if fm&os.ModeSticky != 0 {
|
||||
h.Mode |= c_ISVTX
|
||||
}
|
||||
// If possible, populate additional fields from OS-specific
|
||||
// FileInfo fields.
|
||||
if sys, ok := fi.Sys().(*Header); ok {
|
||||
// This FileInfo came from a Header (not the OS). Use the
|
||||
// original Header to populate all remaining fields.
|
||||
h.Uid = sys.Uid
|
||||
h.Gid = sys.Gid
|
||||
h.Uname = sys.Uname
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var zeroBlock = make([]byte, blockSize)
|
||||
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||
// We compute and return both.
|
||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||
for i := 0; i < len(header); i++ {
|
||||
if i == 148 {
|
||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||
unsigned += ' ' * 8
|
||||
signed += ' ' * 8
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
unsigned += int64(header[i])
|
||||
signed += int64(int8(header[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type slicer []byte
|
||||
|
||||
func (sp *slicer) next(n int) (b []byte) {
|
||||
s := *sp
|
||||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, c := range s {
|
||||
if c < 0x80 {
|
||||
buf.WriteByte(byte(c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
switch flag {
|
||||
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// Create a buffer to write our archive to.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Create a new tar archive.
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
// Add some files to the archive.
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"readme.txt", "This archive contains some text files."},
|
||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||
{"todo.txt", "Get animal handling license."},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Open the tar archive for reading.
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
tr := tar.NewReader(r)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Printf("Contents of %s:\n", hdr.Name)
|
||||
if _, err := io.Copy(os.Stdout, tr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Contents of readme.txt:
|
||||
// This archive contains some text files.
|
||||
// Contents of gopher.txt:
|
||||
// Gopher names:
|
||||
// George
|
||||
// Geoffrey
|
||||
// Gonzo
|
||||
// Contents of todo.txt:
|
||||
// Get animal handling license.
|
||||
}
|
||||
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux dragonfly openbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctim.Unix())
|
||||
}
|
||||
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctimespec.Unix())
|
||||
}
|
||||
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
func statUnix(fi os.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
// TODO(bradfitz): populate username & group. os/user
|
||||
// doesn't cache LookupId lookups, and lacks group
|
||||
// lookup functions.
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
// TODO(bradfitz): major/minor device numbers?
|
||||
return nil
|
||||
}
|
||||
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileInfoHeader(t *testing.T) {
|
||||
fi, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "small.txt"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(5); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
// FileInfoHeader should error when passing nil FileInfo
|
||||
if _, err := FileInfoHeader(nil, ""); err == nil {
|
||||
t.Fatalf("Expected error when passing nil to FileInfoHeader")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderDir(t *testing.T) {
|
||||
fi, err := os.Stat("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "testdata/"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
// Ignoring c_ISGID for golang.org/issue/4867
|
||||
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(0); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderSymlink(t *testing.T) {
|
||||
h, err := FileInfoHeader(symlink{}, "some-target")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := h.Name, "some-symlink"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Linkname, "some-target"; g != e {
|
||||
t.Errorf("Linkname = %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
type symlink struct{}
|
||||
|
||||
func (symlink) Name() string { return "some-symlink" }
|
||||
func (symlink) Size() int64 { return 0 }
|
||||
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
|
||||
func (symlink) ModTime() time.Time { return time.Time{} }
|
||||
func (symlink) IsDir() bool { return false }
|
||||
func (symlink) Sys() interface{} { return nil }
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
data := []byte("some file contents")
|
||||
|
||||
var b bytes.Buffer
|
||||
tw := NewWriter(&b)
|
||||
hdr := &Header{
|
||||
Name: "file.txt",
|
||||
Uid: 1 << 21, // too big for 8 octal digits
|
||||
Size: int64(len(data)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
// tar only supports second precision.
|
||||
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("tw.WriteHeader: %v", err)
|
||||
}
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
t.Fatalf("tw.Write: %v", err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatalf("tw.Close: %v", err)
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
tr := NewReader(&b)
|
||||
rHdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatalf("tr.Next: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(rHdr, hdr) {
|
||||
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
|
||||
}
|
||||
rData, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
if !bytes.Equal(rData, data) {
|
||||
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
|
||||
}
|
||||
}
|
||||
|
||||
type headerRoundTripTest struct {
|
||||
h *Header
|
||||
fm os.FileMode
|
||||
}
|
||||
|
||||
func TestHeaderRoundTrip(t *testing.T) {
|
||||
golden := []headerRoundTripTest{
|
||||
// regular file.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "test.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 12,
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// symbolic link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777 | c_ISLNK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360600852, 0),
|
||||
Typeflag: TypeSymlink,
|
||||
},
|
||||
fm: 0777 | os.ModeSymlink,
|
||||
},
|
||||
// character device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/null",
|
||||
Mode: 0666 | c_ISCHR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578951, 0),
|
||||
Typeflag: TypeChar,
|
||||
},
|
||||
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
|
||||
},
|
||||
// block device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/sda",
|
||||
Mode: 0660 | c_ISBLK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578954, 0),
|
||||
Typeflag: TypeBlock,
|
||||
},
|
||||
fm: 0660 | os.ModeDevice,
|
||||
},
|
||||
// directory.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dir/",
|
||||
Mode: 0755 | c_ISDIR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360601116, 0),
|
||||
Typeflag: TypeDir,
|
||||
},
|
||||
fm: 0755 | os.ModeDir,
|
||||
},
|
||||
// fifo node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/initctl",
|
||||
Mode: 0600 | c_ISFIFO,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578949, 0),
|
||||
Typeflag: TypeFifo,
|
||||
},
|
||||
fm: 0600 | os.ModeNamedPipe,
|
||||
},
|
||||
// setuid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "bin/su",
|
||||
Mode: 0755 | c_ISREG | c_ISUID,
|
||||
Size: 23232,
|
||||
ModTime: time.Unix(1355405093, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0755 | os.ModeSetuid,
|
||||
},
|
||||
// setguid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "group.txt",
|
||||
Mode: 0750 | c_ISREG | c_ISGID,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360602346, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0750 | os.ModeSetgid,
|
||||
},
|
||||
// sticky.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "sticky.txt",
|
||||
Mode: 0600 | c_ISREG | c_ISVTX,
|
||||
Size: 7,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600 | os.ModeSticky,
|
||||
},
|
||||
// hard link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 0,
|
||||
Linkname: "file.txt",
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeLink,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// More information.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "info.txt",
|
||||
Mode: 0600 | c_ISREG,
|
||||
Size: 0,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600,
|
||||
},
|
||||
}
|
||||
|
||||
for i, g := range golden {
|
||||
fi := g.h.FileInfo()
|
||||
h2, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(fi.Name(), "/") {
|
||||
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
|
||||
}
|
||||
name := path.Base(g.h.Name)
|
||||
if fi.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
if got, want := h2.Name, name; got != want {
|
||||
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Size, g.h.Size; got != want {
|
||||
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uid, g.h.Uid; got != want {
|
||||
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gid, g.h.Gid; got != want {
|
||||
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uname, g.h.Uname; got != want {
|
||||
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gname, g.h.Gname; got != want {
|
||||
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Linkname, g.h.Linkname; got != want {
|
||||
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Typeflag, g.h.Typeflag; got != want {
|
||||
t.Logf("%#v %#v", g.h, fi.Sys())
|
||||
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Mode, g.h.Mode; got != want {
|
||||
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := fi.Mode(), g.fm; got != want {
|
||||
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := h2.AccessTime, g.h.AccessTime; got != want {
|
||||
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want {
|
||||
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ModTime, g.h.ModTime; got != want {
|
||||
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
|
||||
t.Errorf("i=%d: Sys didn't return original *Header", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
@@ -0,0 +1,444 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// TODO(dsymonds):
|
||||
// - catch more errors (no first header, etc.)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||
// A tar archive consists of a sequence of files.
|
||||
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
||||
// writing at most hdr.Size bytes in total.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
nb int64 // number of unwritten bytes for current file entry
|
||||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
|
||||
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
|
||||
|
||||
// Flush finishes writing the current file (optional).
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.nb > 0 {
|
||||
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
n := tw.nb + tw.pad
|
||||
for n > 0 && tw.err == nil {
|
||||
nr := n
|
||||
if nr > blockSize {
|
||||
nr = blockSize
|
||||
}
|
||||
var nw int
|
||||
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
||||
n -= int64(nw)
|
||||
}
|
||||
tw.nb = 0
|
||||
tw.pad = 0
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
func (f *formatter) formatString(b []byte, s string) {
|
||||
if len(s) > len(b) {
|
||||
f.err = ErrFieldTooLong
|
||||
return
|
||||
}
|
||||
ascii := toASCII(s)
|
||||
copy(b, ascii)
|
||||
if len(ascii) < len(b) {
|
||||
b[len(ascii)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||
func (f *formatter) formatOctal(b []byte, x int64) {
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// leading zeros, but leave room for a NUL.
|
||||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
|
||||
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||
//
|
||||
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||
// equivalent to the sign bit in two's complement form.
|
||||
func fitsInBase256(n int, x int64) bool {
|
||||
var binBits = uint(n-1) * 8
|
||||
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
|
||||
}
|
||||
|
||||
// Write x into b, as binary (GNUtar/star extension).
|
||||
func (f *formatter) formatNumeric(b []byte, x int64) {
|
||||
if fitsInBase256(len(b), x) {
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // Highest bit indicates binary format
|
||||
return
|
||||
}
|
||||
|
||||
f.formatOctal(b, 0) // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(0, 0)
|
||||
// There is room for 11 octal digits (33 bits) of mtime.
|
||||
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
||||
)
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
return tw.writeHeader(hdr, true)
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
// As this method is called internally by writePax header to allow it to
|
||||
// suppress writing the pax header.
|
||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
if tw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
if tw.err == nil {
|
||||
tw.Flush()
|
||||
}
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
var f formatter
|
||||
var header []byte
|
||||
|
||||
// We need to select which scratch buffer to use carefully,
|
||||
// since this method is called recursively to write PAX headers.
|
||||
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
|
||||
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
|
||||
// already being used by the non-recursive call, so we must use paxHdrBuff.
|
||||
header = tw.hdrBuff[:]
|
||||
if !allowPax {
|
||||
header = tw.paxHdrBuff[:]
|
||||
}
|
||||
copy(header, zeroBlock)
|
||||
s := slicer(header)
|
||||
|
||||
// Wrappers around formatter that automatically sets paxHeaders if the
|
||||
// argument extends beyond the capacity of the input byte slice.
|
||||
var formatString = func(b []byte, s string, paxKeyword string) {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
f.formatOctal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and PAX is preferred, use a PAX header.
|
||||
if paxKeyword != paxNone && tw.preferPax {
|
||||
f.formatOctal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
tw.usedBinary = true
|
||||
f.formatNumeric(b, x)
|
||||
}
|
||||
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
|
||||
var unixTime int64
|
||||
if !t.Before(minTime) && !t.After(maxTime) {
|
||||
unixTime = t.Unix()
|
||||
}
|
||||
formatNumeric(b, unixTime, paxNone)
|
||||
|
||||
// Write a PAX header if the time didn't fit precisely.
|
||||
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
|
||||
paxHeaders[paxKeyword] = formatPAXTime(t)
|
||||
}
|
||||
}
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
formatString(pathHeaderBytes, hdr.Name, paxPath)
|
||||
|
||||
f.formatOctal(s.next(8), hdr.Mode) // 100:108
|
||||
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
|
||||
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
|
||||
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
|
||||
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
formatString(s.next(100), hdr.Linkname, paxLinkpath)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
|
||||
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
|
||||
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
|
||||
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
// Since we can encode in USTAR format, disable PAX header.
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// Update the path fields
|
||||
formatString(pathHeaderBytes, suffix, paxNone)
|
||||
formatString(prefixHeaderBytes, prefix, paxNone)
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
f.formatOctal(header[148:155], chksum) // Never fails
|
||||
header[155] = ' '
|
||||
|
||||
// Check if there were any formatting errors.
|
||||
if f.err != nil {
|
||||
tw.err = f.err
|
||||
return tw.err
|
||||
}
|
||||
|
||||
if allowPax {
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
|
||||
}
|
||||
if !hdr.CreationTime.IsZero() {
|
||||
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
|
||||
}
|
||||
for k, v := range hdr.Xattrs {
|
||||
paxHeaders[paxXattr+k] = v
|
||||
}
|
||||
for k, v := range hdr.Winheaders {
|
||||
paxHeaders[paxWindows+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(paxHeaders) > 0 {
|
||||
if !allowPax {
|
||||
return errInvalidHeader
|
||||
}
|
||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
func formatPAXTime(t time.Time) string {
|
||||
sec := t.Unix()
|
||||
usec := t.Nanosecond()
|
||||
s := strconv.FormatInt(sec, 10)
|
||||
if usec != 0 {
|
||||
s = fmt.Sprintf("%s.%09d", s, usec)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
// archive.
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||
// Prepare extended header
|
||||
ext := new(Header)
|
||||
ext.Typeflag = TypeXHeader
|
||||
// Setting ModTime is required for reader parsing to
|
||||
// succeed, and seems harmless enough.
|
||||
ext.ModTime = hdr.ModTime
|
||||
// The spec asks that we namespace our pseudo files
|
||||
// with the current pid. However, this results in differing outputs
|
||||
// for identical inputs. As such, the constant 0 is now used instead.
|
||||
// golang.org/issue/12358
|
||||
dir, file := path.Split(hdr.Name)
|
||||
fullName := path.Join(dir, "PaxHeaders.0", file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Keys are sorted before writing to body to allow deterministic output.
|
||||
var keys []string
|
||||
for k := range paxHeaders {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
if err := tw.writeHeader(ext, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||
// appropriate length.
|
||||
func formatPAXRecord(k, v string) string {
|
||||
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||
size := len(k) + len(v) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
|
||||
// Final adjustment if adding size field increased the record size.
|
||||
if len(record) != size {
|
||||
size = len(record)
|
||||
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
// Write writes to the current entry in the tar archive.
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// hdr.Size bytes are written after WriteHeader.
|
||||
func (tw *Writer) Write(b []byte) (n int, err error) {
|
||||
if tw.closed {
|
||||
err = ErrWriteAfterClose
|
||||
return
|
||||
}
|
||||
overwrite := false
|
||||
if int64(len(b)) > tw.nb {
|
||||
b = b[0:tw.nb]
|
||||
overwrite = true
|
||||
}
|
||||
n, err = tw.w.Write(b)
|
||||
tw.nb -= int64(n)
|
||||
if err == nil && overwrite {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
tw.err = err
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the tar archive, flushing any unwritten
|
||||
// data to the underlying writer.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err != nil || tw.closed {
|
||||
return tw.err
|
||||
}
|
||||
tw.Flush()
|
||||
tw.closed = true
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// trailer: two zero blocks
|
||||
for i := 0; i < 2; i++ {
|
||||
_, tw.err = tw.w.Write(zeroBlock)
|
||||
if tw.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return tw.err
|
||||
}
|
||||
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
@@ -0,0 +1,739 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
)
|
||||
|
||||
type writerTestEntry struct {
|
||||
header *Header
|
||||
contents string
|
||||
}
|
||||
|
||||
type writerTest struct {
|
||||
file string // filename of expected output
|
||||
entries []*writerTestEntry
|
||||
}
|
||||
|
||||
var writerTests = []*writerTest{
|
||||
// The writer test file was produced with this command:
|
||||
// tar (GNU tar) 1.26
|
||||
// ln -s small.txt link.txt
|
||||
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
|
||||
{
|
||||
file: "testdata/writer.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1246508266, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Kilts",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small2.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1245217492, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Google.com\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1314603082, 0),
|
||||
Typeflag: '2',
|
||||
Linkname: "small.txt",
|
||||
Uname: "strings",
|
||||
Gname: "strings",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
|
||||
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
|
||||
{
|
||||
file: "testdata/writer-big.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "tmp/16gig.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1254699560, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
|
||||
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
|
||||
{
|
||||
file: "testdata/writer-big-long.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "16gig.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1399583047, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "guillaume",
|
||||
Gname: "guillaume",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.17
|
||||
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
|
||||
{
|
||||
file: "testdata/ustar.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 0765,
|
||||
Gid: 024,
|
||||
Size: 06,
|
||||
ModTime: time.Unix(1360135598, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "shane",
|
||||
Gname: "staff",
|
||||
},
|
||||
contents: "hello\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.26
|
||||
// echo "Slartibartfast" > file.txt
|
||||
// ln file.txt hard.txt
|
||||
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
|
||||
{
|
||||
file: "testdata/hardlink.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 15,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
contents: "Slartibartfast\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '1',
|
||||
Linkname: "file.txt",
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
|
||||
func bytestr(offset int, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("%04x ", offset)
|
||||
for _, ch := range b {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
|
||||
s += fmt.Sprintf(" %c", ch)
|
||||
default:
|
||||
s += fmt.Sprintf(" %02x", ch)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Render a pseudo-diff between two blocks of bytes.
|
||||
func bytediff(a []byte, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
|
||||
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
|
||||
na, nb := rowLen, rowLen
|
||||
if na > len(a) {
|
||||
na = len(a)
|
||||
}
|
||||
if nb > len(b) {
|
||||
nb = len(b)
|
||||
}
|
||||
sa := bytestr(offset, a[0:na])
|
||||
sb := bytestr(offset, b[0:nb])
|
||||
if sa != sb {
|
||||
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
|
||||
}
|
||||
a = a[na:]
|
||||
b = b[nb:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
testLoop:
|
||||
for i, test := range writerTests {
|
||||
expected, err := ioutil.ReadFile(test.file)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
|
||||
big := false
|
||||
for j, entry := range test.entries {
|
||||
big = big || entry.header.Size > 1<<10
|
||||
if err := tw.WriteHeader(entry.header); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
if _, err := io.WriteString(tw, entry.contents); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
}
|
||||
// Only interested in Close failures for the small tests.
|
||||
if err := tw.Close(); err != nil && !big {
|
||||
t.Errorf("test %d: Failed closing archive: %v", i, err)
|
||||
continue testLoop
|
||||
}
|
||||
|
||||
actual := buf.Bytes()
|
||||
if !bytes.Equal(expected, actual) {
|
||||
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
|
||||
i, bytediff(expected, actual))
|
||||
}
|
||||
if testing.Short() { // The second test is expensive.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPax(t *testing.T) {
|
||||
// Create an archive with a large name
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written
|
||||
longName := strings.Repeat("ab", 100)
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
hdr.Name = longName
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long file name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxSymlink(t *testing.T) {
|
||||
// Create an archive with a large linkname
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Linkname != longLinkname {
|
||||
t.Fatal("Couldn't recover long link name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxNonAscii(t *testing.T) {
|
||||
// Create an archive with non ascii. These should trigger a pax header
|
||||
// because pax headers have a defined utf-8 encoding.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
|
||||
// some sample data
|
||||
chineseFilename := "文件名"
|
||||
chineseGroupname := "組"
|
||||
chineseUsername := "用戶名"
|
||||
|
||||
hdr.Name = chineseFilename
|
||||
hdr.Gname = chineseGroupname
|
||||
hdr.Uname = chineseUsername
|
||||
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != chineseFilename {
|
||||
t.Fatal("Couldn't recover unicode name")
|
||||
}
|
||||
if hdr.Gname != chineseGroupname {
|
||||
t.Fatal("Couldn't recover unicode group")
|
||||
}
|
||||
if hdr.Uname != chineseUsername {
|
||||
t.Fatal("Couldn't recover unicode user")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxXattrs(t *testing.T) {
|
||||
xattrs := map[string]string{
|
||||
"user.key": "value",
|
||||
}
|
||||
|
||||
// Create an archive with an xattr
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := "Kilts"
|
||||
hdr.Xattrs = xattrs
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get the xattrs back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxHeadersSorted(t *testing.T) {
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
hdr.Xattrs = map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
"baz": "baz",
|
||||
"qux": "qux",
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
|
||||
// xattr bar should always appear before others
|
||||
indices := []int{
|
||||
bytes.Index(buf.Bytes(), []byte("bar=bar")),
|
||||
bytes.Index(buf.Bytes(), []byte("baz=baz")),
|
||||
bytes.Index(buf.Bytes(), []byte("foo=foo")),
|
||||
bytes.Index(buf.Bytes(), []byte("qux=qux")),
|
||||
}
|
||||
if !sort.IntsAreSorted(indices) {
|
||||
t.Fatal("PAX headers are not sorted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUSTARLongName(t *testing.T) {
|
||||
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
hdr.Name = longName
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidTypeflagWithPAXHeader(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
fileName := strings.Repeat("ab", 100)
|
||||
|
||||
hdr := &Header{
|
||||
Name: fileName,
|
||||
Size: 4,
|
||||
Typeflag: 0,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
if _, err := tw.Write([]byte("fooo")); err != nil {
|
||||
t.Fatalf("Failed to write the file's data: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
|
||||
tr := NewReader(&buffer)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read header: %s", err)
|
||||
}
|
||||
if header.Typeflag != 0 {
|
||||
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAfterClose(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
hdr := &Header{
|
||||
Name: "small.txt",
|
||||
Size: 5,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
|
||||
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitUSTARPath(t *testing.T) {
|
||||
var sr = strings.Repeat
|
||||
|
||||
var vectors = []struct {
|
||||
input string // Input path
|
||||
prefix string // Expected output prefix
|
||||
suffix string // Expected output suffix
|
||||
ok bool // Split success?
|
||||
}{
|
||||
{"", "", "", false},
|
||||
{"abc", "", "", false},
|
||||
{"用戶名", "", "", false},
|
||||
{sr("a", fileNameSize), "", "", false},
|
||||
{sr("a", fileNameSize) + "/", "", "", false},
|
||||
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
|
||||
{sr("a", fileNamePrefixSize) + "/", "", "", false},
|
||||
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
|
||||
{sr("a", fileNameSize+1), "", "", false},
|
||||
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
|
||||
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
|
||||
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
|
||||
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
|
||||
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
prefix, suffix, ok := splitUSTARPath(v.input)
|
||||
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
|
||||
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
|
||||
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXRecord(t *testing.T) {
|
||||
var medName = strings.Repeat("CD", 50)
|
||||
var longName = strings.Repeat("AB", 100)
|
||||
|
||||
var vectors = []struct {
|
||||
inputKey string
|
||||
inputVal string
|
||||
output string
|
||||
}{
|
||||
{"k", "v", "6 k=v\n"},
|
||||
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
|
||||
{"path", longName, "210 path=" + longName + "\n"},
|
||||
{"path", medName, "110 path=" + medName + "\n"},
|
||||
{"foo", "ba", "9 foo=ba\n"},
|
||||
{"foo", "bar", "11 foo=bar\n"},
|
||||
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
|
||||
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
|
||||
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
|
||||
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
output := formatPAXRecord(v.inputKey, v.inputVal)
|
||||
if output != v.output {
|
||||
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
|
||||
v.inputKey, v.inputVal, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFitsInBase256(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
width int
|
||||
ok bool
|
||||
}{
|
||||
{+1, 8, true},
|
||||
{0, 8, true},
|
||||
{-1, 8, true},
|
||||
{1 << 56, 8, false},
|
||||
{(1 << 56) - 1, 8, true},
|
||||
{-1 << 56, 8, true},
|
||||
{(-1 << 56) - 1, 8, false},
|
||||
{121654, 8, true},
|
||||
{-9849849, 8, true},
|
||||
{math.MaxInt64, 9, true},
|
||||
{0, 9, true},
|
||||
{math.MinInt64, 9, true},
|
||||
{math.MaxInt64, 12, true},
|
||||
{0, 12, true},
|
||||
{math.MinInt64, 12, true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
ok := fitsInBase256(v.width, v.input)
|
||||
if ok != v.ok {
|
||||
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatNumeric(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
output string
|
||||
ok bool
|
||||
}{
|
||||
// Test base-256 (binary) encoded values.
|
||||
{-1, "\xff", true},
|
||||
{-1, "\xff\xff", true},
|
||||
{-1, "\xff\xff\xff", true},
|
||||
{(1 << 0), "0", false},
|
||||
{(1 << 8) - 1, "\x80\xff", true},
|
||||
{(1 << 8), "0\x00", false},
|
||||
{(1 << 16) - 1, "\x80\xff\xff", true},
|
||||
{(1 << 16), "00\x00", false},
|
||||
{-1 * (1 << 0), "\xff", true},
|
||||
{-1*(1<<0) - 1, "0", false},
|
||||
{-1 * (1 << 8), "\xff\x00", true},
|
||||
{-1*(1<<8) - 1, "0\x00", false},
|
||||
{-1 * (1 << 16), "\xff\x00\x00", true},
|
||||
{-1*(1<<16) - 1, "00\x00", false},
|
||||
{537795476381659745, "0000000\x00", false},
|
||||
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{-615126028225187231, "0000000\x00", false},
|
||||
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{math.MaxInt64, "0000000\x00", false},
|
||||
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "0000000\x00", false},
|
||||
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
var f formatter
|
||||
output := make([]byte, len(v.output))
|
||||
f.formatNumeric(output, v.input)
|
||||
ok := (f.err == nil)
|
||||
if ok != v.ok {
|
||||
if v.ok {
|
||||
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
|
||||
} else {
|
||||
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
|
||||
}
|
||||
}
|
||||
if string(output) != v.output {
|
||||
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXTime(t *testing.T) {
|
||||
t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC)
|
||||
t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
verify := func(time time.Time, s string) {
|
||||
p := formatPAXTime(time)
|
||||
if p != s {
|
||||
t.Errorf("for %v, expected %s, got %s", time, s, p)
|
||||
}
|
||||
}
|
||||
verify(t1, "946724400")
|
||||
verify(t2, "946724400.000000100")
|
||||
verify(t3, "-315579600")
|
||||
verify(t4, "0")
|
||||
}
|
||||
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
BackupEaData
|
||||
BackupSecurity
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
)
|
||||
|
||||
const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
}
|
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct {
|
||||
r io.Reader
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
if wsi.NameSize != 0 {
|
||||
name := make([]uint16, int(wsi.NameSize/2))
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Size -= 8
|
||||
}
|
||||
r.bytesLeft = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) {
|
||||
if r.bytesLeft == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > r.bytesLeft {
|
||||
b = b[:r.bytesLeft]
|
||||
}
|
||||
n, err := r.r.Read(b)
|
||||
r.bytesLeft -= int64(n)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if r.bytesLeft == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct {
|
||||
w io.Writer
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
|
||||
return &BackupStreamWriter{w, 0}
|
||||
}
|
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
if w.bytesLeft != 0 {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8
|
||||
}
|
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(name) != 0 {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.bytesLeft = hdr.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
|
||||
if w.bytesLeft < int64(len(b)) {
|
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
|
||||
}
|
||||
n, err := w.w.Write(b)
|
||||
w.bytesLeft -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
r := &BackupFileReader{f, includeSecurity, 0}
|
||||
return r
|
||||
}
|
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
return w
|
||||
}
|
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
return int(bytesWritten), errors.New("not all bytes could be written")
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
||||
255
vendor/github.com/Microsoft/go-winio/backup_test.go
generated
vendored
Normal file
255
vendor/github.com/Microsoft/go-winio/backup_test.go
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testFileName string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
f, err := ioutil.TempFile("", "tmp")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
testFileName = f.Name()
|
||||
f.Close()
|
||||
defer os.Remove(testFileName)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func makeTestFile(makeADS bool) error {
|
||||
os.Remove(testFileName)
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Write([]byte("testing 1 2 3\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if makeADS {
|
||||
a, err := os.Create(testFileName + ":ads.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer a.Close()
|
||||
_, err = a.Write([]byte("alternate data stream\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBackupRead(t *testing.T) {
|
||||
err := makeTestFile(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(b) == 0 {
|
||||
t.Fatal("no data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupStreamRead(t *testing.T) {
|
||||
err := makeTestFile(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
|
||||
br := NewBackupStreamReader(r)
|
||||
gotData := false
|
||||
gotAltData := false
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
switch hdr.Id {
|
||||
case BackupData:
|
||||
if gotData {
|
||||
t.Fatal("duplicate data")
|
||||
}
|
||||
if hdr.Name != "" {
|
||||
t.Fatalf("unexpected name %s", hdr.Name)
|
||||
}
|
||||
b, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != "testing 1 2 3\n" {
|
||||
t.Fatalf("incorrect data %v", b)
|
||||
}
|
||||
gotData = true
|
||||
case BackupAlternateData:
|
||||
if gotAltData {
|
||||
t.Fatal("duplicate alt data")
|
||||
}
|
||||
if hdr.Name != ":ads.txt:$DATA" {
|
||||
t.Fatalf("incorrect name %s", hdr.Name)
|
||||
}
|
||||
b, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != "alternate data stream\n" {
|
||||
t.Fatalf("incorrect data %v", b)
|
||||
}
|
||||
gotAltData = true
|
||||
default:
|
||||
t.Fatalf("unknown stream ID %d", hdr.Id)
|
||||
}
|
||||
}
|
||||
if !gotData || !gotAltData {
|
||||
t.Fatal("missing stream")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupStreamWrite(t *testing.T) {
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
w := NewBackupFileWriter(f, false)
|
||||
defer w.Close()
|
||||
|
||||
data := "testing 1 2 3\n"
|
||||
altData := "alternate stream\n"
|
||||
|
||||
br := NewBackupStreamWriter(w)
|
||||
err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := br.Write([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Fatal("short write")
|
||||
}
|
||||
|
||||
err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = br.Write([]byte(altData))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(altData) {
|
||||
t.Fatal("short write")
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
b, err := ioutil.ReadFile(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != data {
|
||||
t.Fatalf("wrong data %v", b)
|
||||
}
|
||||
|
||||
b, err = ioutil.ReadFile(testFileName + ":ads.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != altData {
|
||||
t.Fatalf("wrong data %v", b)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSparseFile() error {
|
||||
os.Remove(testFileName)
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
const (
|
||||
FSCTL_SET_SPARSE = 0x000900c4
|
||||
FSCTL_SET_ZERO_DATA = 0x000980c8
|
||||
)
|
||||
|
||||
err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte("testing 1 2 3\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Seek(1000000, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte("more data later\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBackupSparseFile(t *testing.T) {
|
||||
err := makeSparseFile()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
|
||||
br := NewBackupStreamReader(r)
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(hdr)
|
||||
}
|
||||
}
|
||||
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
// +build !windows
|
||||
// This file only exists to allow go get on non-Windows platforms.
|
||||
|
||||
package backuptar
|
||||
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
@@ -0,0 +1,439 @@
|
||||
// +build windows
|
||||
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
|
||||
)
|
||||
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFileAttributes = "fileattr"
|
||||
hdrSecurityDescriptor = "sd"
|
||||
hdrRawSecurityDescriptor = "rawsd"
|
||||
hdrMountPoint = "mountpoint"
|
||||
hdrEaPrefix = "xattr."
|
||||
)
|
||||
|
||||
func writeZeroes(w io.Writer, count int64) error {
|
||||
buf := make([]byte, 8192)
|
||||
c := len(buf)
|
||||
for i := int64(0); i < count; i += int64(c) {
|
||||
if int64(c) > count-i {
|
||||
c = int(count - i)
|
||||
}
|
||||
_, err := w.Write(buf[:c])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
curOffset := int64(0)
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id != winio.BackupSparseBlock {
|
||||
return fmt.Errorf("unexpected stream %d", bhdr.Id)
|
||||
}
|
||||
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
err = writeZeroes(t, bhdr.Offset-curOffset)
|
||||
if bhdr.Size == 0 {
|
||||
break
|
||||
}
|
||||
n, err := io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curOffset = bhdr.Offset + n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BasicInfoHeader creates a tar header from basic file information.
|
||||
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
|
||||
hdr := &tar.Header{
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
|
||||
Winheaders: make(map[string]string),
|
||||
}
|
||||
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= c_ISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
}
|
||||
return hdr
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
//
|
||||
// The additional Win32 metadata is:
|
||||
//
|
||||
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
//
|
||||
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
//
|
||||
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
|
||||
name = filepath.ToSlash(name)
|
||||
hdr := BasicInfoHeader(name, size, fileInfo)
|
||||
|
||||
// If r can be seeked, then this function is two-pass: pass 1 collects the
|
||||
// tar header data, and pass 2 copies the data stream. If r cannot be
|
||||
// seeked, then some header data (in particular EAs) will be silently lost.
|
||||
var (
|
||||
restartPos int64
|
||||
err error
|
||||
)
|
||||
sr, readTwice := r.(io.Seeker)
|
||||
if readTwice {
|
||||
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
|
||||
readTwice = false
|
||||
}
|
||||
}
|
||||
|
||||
br := winio.NewBackupStreamReader(r)
|
||||
var dataHdr *winio.BackupHeader
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupData:
|
||||
hdr.Mode |= c_ISREG
|
||||
if !readTwice {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
case winio.BackupSecurity:
|
||||
sd, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= c_ISLNK
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
reparseBuffer, err := ioutil.ReadAll(br)
|
||||
rp, err := winio.DecodeReparsePoint(reparseBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rp.IsMountPoint {
|
||||
hdr.Winheaders[hdrMountPoint] = "1"
|
||||
}
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
case winio.BackupEaData:
|
||||
eab, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eas, err := winio.DecodeExtendedAttributes(eab)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ea := range eas {
|
||||
// Use base64 encoding for the binary value. Note that there
|
||||
// is no way to encode the EA's flags, since their use doesn't
|
||||
// make any sense for persisted EAs.
|
||||
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
}
|
||||
|
||||
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if readTwice {
|
||||
// Get back to the data stream.
|
||||
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id == winio.BackupData {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dataHdr != nil {
|
||||
// A data stream was found. Copy the data.
|
||||
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
if size != dataHdr.Size {
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = copySparse(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for streams after the data stream. The only ones we handle are alternate data streams.
|
||||
// Other streams may have metadata that could be serialized, but the tar header has already
|
||||
// been written. In practice, this means that we don't get EA or TXF metadata.
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupAlternateData:
|
||||
altName := bhdr.Name
|
||||
if strings.HasSuffix(altName, ":$DATA") {
|
||||
altName = altName[:len(altName)-len(":$DATA")]
|
||||
}
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
hdr = &tar.Header{
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return errors.New("tar of sparse alternate data streams is unsupported")
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
|
||||
// WriteTarFileFromBackupStream.
|
||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
name = hdr.Name
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
|
||||
}
|
||||
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
|
||||
attr, err := strconv.ParseUint(attrStr, 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.FileAttributes = uint32(attr)
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
|
||||
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupSecurity,
|
||||
Size: int64(len(sd)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.Winheaders {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(eadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(reparse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
Size: hdr.Size,
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Copy all the alternate data streams and return the next non-ADS header.
|
||||
for {
|
||||
ahdr, err := t.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
|
||||
return ahdr, nil
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupAlternateData,
|
||||
Size: ahdr.Size,
|
||||
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar"
|
||||
)
|
||||
|
||||
func ensurePresent(t *testing.T, m map[string]string, keys ...string) {
|
||||
for _, k := range keys {
|
||||
if _, ok := m[k]; !ok {
|
||||
t.Error(k, "not present in tar header")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "tst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = f.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bi, err := winio.GetFileBasicInfo(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
br := winio.NewBackupFileReader(f, true)
|
||||
defer br.Close()
|
||||
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
|
||||
err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tr := tar.NewReader(&buf)
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
name, size, bi2, err := FileInfoFromHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if name != filepath.ToSlash(f.Name()) {
|
||||
t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name()))
|
||||
}
|
||||
|
||||
if size != fi.Size() {
|
||||
t.Errorf("got size %d, expected %d", size, fi.Size())
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(*bi, *bi2) {
|
||||
t.Errorf("got %#v, expected %#v", *bi, *bi2)
|
||||
}
|
||||
|
||||
ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd")
|
||||
}
|
||||
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
89
vendor/github.com/Microsoft/go-winio/ea_test.go
generated
vendored
Normal file
89
vendor/github.com/Microsoft/go-winio/ea_test.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
testEas = []ExtendedAttribute{
|
||||
{Name: "foo", Value: []byte("bar")},
|
||||
{Name: "fizz", Value: []byte("buzz")},
|
||||
}
|
||||
|
||||
testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0}
|
||||
testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3]
|
||||
testEasTruncated = testEasEncoded[0:20]
|
||||
)
|
||||
|
||||
func Test_RoundTripEas(t *testing.T) {
|
||||
b, err := EncodeExtendedAttributes(testEas)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(testEasEncoded, b) {
|
||||
t.Fatalf("encoded mismatch %v %v", testEasEncoded, b)
|
||||
}
|
||||
eas, err := DecodeExtendedAttributes(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(testEas, eas) {
|
||||
t.Fatalf("mismatch %+v %+v", testEas, eas)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_EasDontNeedPaddingAtEnd(t *testing.T) {
|
||||
eas, err := DecodeExtendedAttributes(testEasNotPadded)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(testEas, eas) {
|
||||
t.Fatalf("mismatch %+v %+v", testEas, eas)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_TruncatedEasFailCorrectly(t *testing.T) {
|
||||
_, err := DecodeExtendedAttributes(testEasTruncated)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NilEasEncodeAndDecodeAsNil(t *testing.T) {
|
||||
b, err := EncodeExtendedAttributes(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(b) != 0 {
|
||||
t.Fatal("expected empty")
|
||||
}
|
||||
eas, err := DecodeExtendedAttributes(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
t.Fatal("expected empty")
|
||||
}
|
||||
}
|
||||
|
||||
// Test_SetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile.
|
||||
func Test_SetFileEa(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "winio")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
ntdll := syscall.MustLoadDLL("ntdll.dll")
|
||||
ntSetEaFile := ntdll.MustFindProc("NtSetEaFile")
|
||||
var iosb [2]uintptr
|
||||
r, _, _ := ntSetEaFile.Call(f.Fd(), uintptr(unsafe.Pointer(&iosb[0])), uintptr(unsafe.Pointer(&testEasEncoded[0])), uintptr(len(testEasEncoded)))
|
||||
if r != 0 {
|
||||
t.Fatalf("NtSetEaFile failed with %08x", r)
|
||||
}
|
||||
}
|
||||
307
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
307
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
ErrTimeout = &timeoutError{}
|
||||
)
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIo() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go ioCompletionProcessor(h)
|
||||
}
|
||||
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
||||
type deadlineHandler struct {
|
||||
setLock sync.Mutex
|
||||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomicBool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIo)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.readDeadline.channel = make(timeoutChan)
|
||||
f.writeDeadline.channel = make(timeoutChan)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return makeWin32File(h)
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes a win32File.
|
||||
func (f *win32File) Close() error {
|
||||
f.closeHandle()
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
f.wgLock.RUnlock()
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
op.ch <- ioResult{bytes, err}
|
||||
}
|
||||
}
|
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING {
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
if d != nil {
|
||||
d.channelLock.Lock()
|
||||
timeout = d.channel
|
||||
d.channelLock.Unlock()
|
||||
}
|
||||
|
||||
var r ioResult
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE {
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *win32File) SetReadDeadline(deadline time.Time) error {
|
||||
return f.readDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||
return f.writeDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
if !d.timer.Stop() {
|
||||
<-d.channel
|
||||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.setFalse()
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
d.channelLock.Lock()
|
||||
d.channel = make(chan struct{})
|
||||
d.channelLock.Unlock()
|
||||
default:
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.setTrue()
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
duration := deadline.Sub(now)
|
||||
if deadline.After(now) {
|
||||
// Deadline is in the future, set a timer to wait
|
||||
d.timer = time.AfterFunc(duration, timeoutIO)
|
||||
} else {
|
||||
// Deadline is in the past. Cancel all pending IO now.
|
||||
timeoutIO()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||
|
||||
const (
|
||||
fileBasicInfo = 0
|
||||
fileIDInfo = 0x12
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
VolumeSerialNumber uint64
|
||||
FileID [16]byte
|
||||
}
|
||||
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return fileID, nil
|
||||
}
|
||||
15
vendor/github.com/Microsoft/go-winio/internal/etw/etw.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/internal/etw/etw.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Package etw provides support for TraceLogging-based ETW (Event Tracing
|
||||
// for Windows). TraceLogging is a format of ETW events that are self-describing
|
||||
// (the event contains information on its own schema). This allows them to be
|
||||
// decoded without needing a separate manifest with event information. The
|
||||
// implementation here is based on the information found in
|
||||
// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a
|
||||
// set of C macros.
|
||||
package etw
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go etw.go
|
||||
|
||||
//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister
|
||||
//sys eventUnregister(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister
|
||||
//sys eventWriteTransfer(providerHandle providerHandle, descriptor *EventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
||||
//sys eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
||||
65
vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go
generated
vendored
Normal file
65
vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// EventData maintains a buffer which builds up the data for an ETW event. It
|
||||
// needs to be paired with EventMetadata which describes the event.
|
||||
type EventData struct {
|
||||
buffer bytes.Buffer
|
||||
}
|
||||
|
||||
// Bytes returns the raw binary data containing the event data. The returned
|
||||
// value is not copied from the internal buffer, so it can be mutated by the
|
||||
// EventData object after it is returned.
|
||||
func (ed *EventData) Bytes() []byte {
|
||||
return ed.buffer.Bytes()
|
||||
}
|
||||
|
||||
// WriteString appends a string, including the null terminator, to the buffer.
|
||||
func (ed *EventData) WriteString(data string) {
|
||||
ed.buffer.WriteString(data)
|
||||
ed.buffer.WriteByte(0)
|
||||
}
|
||||
|
||||
// WriteInt8 appends a int8 to the buffer.
|
||||
func (ed *EventData) WriteInt8(value int8) {
|
||||
ed.buffer.WriteByte(uint8(value))
|
||||
}
|
||||
|
||||
// WriteInt16 appends a int16 to the buffer.
|
||||
func (ed *EventData) WriteInt16(value int16) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteInt32 appends a int32 to the buffer.
|
||||
func (ed *EventData) WriteInt32(value int32) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteInt64 appends a int64 to the buffer.
|
||||
func (ed *EventData) WriteInt64(value int64) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteUint8 appends a uint8 to the buffer.
|
||||
func (ed *EventData) WriteUint8(value uint8) {
|
||||
ed.buffer.WriteByte(value)
|
||||
}
|
||||
|
||||
// WriteUint16 appends a uint16 to the buffer.
|
||||
func (ed *EventData) WriteUint16(value uint16) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteUint32 appends a uint32 to the buffer.
|
||||
func (ed *EventData) WriteUint32(value uint32) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// WriteUint64 appends a uint64 to the buffer.
|
||||
func (ed *EventData) WriteUint64(value uint64) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
29
vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go
generated
vendored
Normal file
29
vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type eventDataDescriptorType uint8
|
||||
|
||||
const (
|
||||
eventDataDescriptorTypeUserData eventDataDescriptorType = iota
|
||||
eventDataDescriptorTypeEventMetadata
|
||||
eventDataDescriptorTypeProviderMetadata
|
||||
)
|
||||
|
||||
type eventDataDescriptor struct {
|
||||
ptr ptr64
|
||||
size uint32
|
||||
dataType eventDataDescriptorType
|
||||
reserved1 uint8
|
||||
reserved2 uint16
|
||||
}
|
||||
|
||||
func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor {
|
||||
return eventDataDescriptor{
|
||||
ptr: ptr64{ptr: unsafe.Pointer(&buffer[0])},
|
||||
size: uint32(len(buffer)),
|
||||
dataType: dataType,
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user