Container changes (#320)

Added container metrics
This commit is contained in:
Sachin Kumar
2019-03-13 13:31:29 -07:00
committed by Calle Pettersson
parent 517cd3b04b
commit 8841091f9c
388 changed files with 48899 additions and 0 deletions

View File

@@ -11,6 +11,7 @@ clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
install:
- set PATH=%GOPATH%\bin;%PATH%
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin
- go get -u github.com/prometheus/promu
- go get -u github.com/alecthomas/gometalinter && gometalinter --install
- choco install gitversion.portable make -y

282
collector/container.go Normal file
View File

@@ -0,0 +1,282 @@
// +build windows
package collector
import (
"github.com/Microsoft/hcsshim"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
func init() {
Factories["container"] = NewContainerMetricsCollector
}
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
type ContainerMetricsCollector struct {
// Presence
ContainerAvailable *prometheus.Desc
// Number of containers
ContainersCount *prometheus.Desc
// memory
UsageCommitBytes *prometheus.Desc
UsageCommitPeakBytes *prometheus.Desc
UsagePrivateWorkingSetBytes *prometheus.Desc
// CPU
RuntimeTotal *prometheus.Desc
RuntimeUser *prometheus.Desc
RuntimeKernel *prometheus.Desc
// Network
BytesReceived *prometheus.Desc
BytesSent *prometheus.Desc
PacketsReceived *prometheus.Desc
PacketsSent *prometheus.Desc
DroppedPacketsIncoming *prometheus.Desc
DroppedPacketsOutgoing *prometheus.Desc
}
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
func NewContainerMetricsCollector() (Collector, error) {
const subsystem = "container"
return &ContainerMetricsCollector{
ContainerAvailable: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "available"),
"Available",
[]string{"container_id"},
nil,
),
ContainersCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "count"),
"Number of containers",
nil,
nil,
),
UsageCommitBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"),
"Memory Usage Commit Bytes",
[]string{"container_id"},
nil,
),
UsageCommitPeakBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"),
"Memory Usage Commit Peak Bytes",
[]string{"container_id"},
nil,
),
UsagePrivateWorkingSetBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"),
"Memory Usage Private Working Set Bytes",
[]string{"container_id"},
nil,
),
RuntimeTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"),
"Total Run time in Seconds",
[]string{"container_id"},
nil,
),
RuntimeUser: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"),
"Run Time in User mode in Seconds",
[]string{"container_id"},
nil,
),
RuntimeKernel: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"),
"Run time in Kernel mode in Seconds",
[]string{"container_id"},
nil,
),
BytesReceived: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"),
"Bytes Received on Interface",
[]string{"container_id", "interface"},
nil,
),
BytesSent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"),
"Bytes Sent on Interface",
[]string{"container_id", "interface"},
nil,
),
PacketsReceived: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"),
"Packets Received on Interface",
[]string{"container_id", "interface"},
nil,
),
PacketsSent: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"),
"Packets Sent on Interface",
[]string{"container_id", "interface"},
nil,
),
DroppedPacketsIncoming: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"),
"Dropped Incoming Packets on Interface",
[]string{"container_id", "interface"},
nil,
),
DroppedPacketsOutgoing: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"),
"Dropped Outgoing Packets on Interface",
[]string{"container_id", "interface"},
nil,
),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *ContainerMetricsCollector) Collect(ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
return err
}
return nil
}
// containerClose closes the container resource
func containerClose(c hcsshim.Container) {
err := c.Close()
if err != nil {
log.Error(err)
}
}
func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
// Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil {
log.Error("Err in Getting containers:", err)
return nil, err
}
count := len(containers)
ch <- prometheus.MustNewConstMetric(
c.ContainersCount,
prometheus.GaugeValue,
float64(count),
)
if count == 0 {
return nil, nil
}
for _, containerDetails := range containers {
containerId := containerDetails.ID
container, err := hcsshim.OpenContainer(containerId)
if container != nil {
defer containerClose(container)
}
if err != nil {
log.Error("err in opening container: ", containerId, err)
continue
}
cstats, err := container.Statistics()
if err != nil {
log.Error("err in fetching container Statistics: ", containerId, err)
continue
}
// HCS V1 is for docker runtime. Add the docker:// prefix on container_id
containerId = "docker://" + containerId
ch <- prometheus.MustNewConstMetric(
c.ContainerAvailable,
prometheus.CounterValue,
1,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.UsageCommitBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsageCommitBytes),
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.UsageCommitPeakBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsageCommitPeakBytes),
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.UsagePrivateWorkingSetBytes,
prometheus.GaugeValue,
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeTotal,
prometheus.CounterValue,
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeUser,
prometheus.CounterValue,
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
containerId,
)
ch <- prometheus.MustNewConstMetric(
c.RuntimeKernel,
prometheus.CounterValue,
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
containerId,
)
if len(cstats.Network) == 0 {
log.Info("No Network Stats for container: ", containerId)
continue
}
networkStats := cstats.Network
for _, networkInterface := range networkStats {
ch <- prometheus.MustNewConstMetric(
c.BytesReceived,
prometheus.CounterValue,
float64(networkInterface.BytesReceived),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.BytesSent,
prometheus.CounterValue,
float64(networkInterface.BytesSent),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsReceived,
prometheus.CounterValue,
float64(networkInterface.PacketsReceived),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.PacketsSent,
prometheus.CounterValue,
float64(networkInterface.PacketsSent),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.DroppedPacketsIncoming,
prometheus.CounterValue,
float64(networkInterface.DroppedPacketsIncoming),
containerId, networkInterface.EndpointId,
)
ch <- prometheus.MustNewConstMetric(
c.DroppedPacketsOutgoing,
prometheus.CounterValue,
float64(networkInterface.DroppedPacketsOutgoing),
containerId, networkInterface.EndpointId,
)
break
}
}
return nil, nil
}

View File

@@ -0,0 +1,40 @@
# container collector
The container collector exposes metrics about containers running on system
Metric name prefix | `container`
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`wmi_container_available` | Available | counter | `container_id`
`wmi_container_count` | Number of containers | gauge | `container_id`
`wmi_container_cpu_usage_seconds_kernelmode` | Run time in Kernel mode in Seconds | counter | `container_id`
`wmi_container_cpu_usage_seconds_usermode` | Run Time in User mode in Seconds | counter | `container_id`
`wmi_container_cpu_usage_seconds_total` | Total Run time in Seconds | counter | `container_id`
`wmi_container_memory_usage_commit_bytes` | Memory Usage Commit Bytes | gauge | `container_id`
`wmi_container_memory_usage_commit_peak_bytes` | Memory Usage Commit Peak Bytes | gauge | `container_id`
`wmi_container_memory_usage_private_working_set_bytes` | Memory Usage Private Working Set Bytes | gauge | `container_id`
`wmi_container_network_receive_bytes_total` | Bytes Received on Interface | counter | `container_id`, `interface`
`wmi_container_network_receive_packets_total` | Packets Received on Interface | counter | `container_id`, `interface`
`wmi_container_network_receive_packets_dropped_total` | Dropped Incoming Packets on Interface | counter | `container_id`, `interface`
`wmi_container_network_transmit_bytes_total` | Bytes Sent on Interface | counter | `container_id`, `interface`
`wmi_container_network_transmit_packets_total` | Packets Sent on Interface | counter | `container_id`, `interface`
`wmi_container_network_transmit_packets_dropped_total` | Dropped Outgoing Packets on Interface | counter | `container_id`, `interface`
### Example metric
_wmi_container_network_receive_bytes_total{container_id="docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e",interface="822179E7-002C-4280-ABBA-28BCFE401826"} 9.3305343e+07_
This metric means that total _9.3305343e+07_ bytes received on interface _822179E7-002C-4280-ABBA-28BCFE401826_ for container _docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

1
vendor/github.com/Microsoft/go-winio/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
*.exe

22
vendor/github.com/Microsoft/go-winio/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

22
vendor/github.com/Microsoft/go-winio/README.md generated vendored Normal file
View File

@@ -0,0 +1,22 @@
# go-winio
This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and
for using named pipes as a net transport.
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
newer operating systems. This is similar to the implementation of network sockets in Go's net
package.
Please see the LICENSE file for licensing information.
This project has adopted the [Microsoft Open Source Code of
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
see the [Code of Conduct
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
questions or comments.
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
for another named pipe implementation.

View File

@@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,344 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
// It aims to cover most of the variations, including those produced
// by GNU and BSD tars.
//
// References:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
package tar
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"time"
)
const (
blockSize = 512
// Types
TypeReg = '0' // regular file
TypeRegA = '\x00' // regular file
TypeLink = '1' // hard link
TypeSymlink = '2' // symbolic link
TypeChar = '3' // character device node
TypeBlock = '4' // block device node
TypeDir = '5' // directory
TypeFifo = '6' // fifo node
TypeCont = '7' // reserved
TypeXHeader = 'x' // extended header
TypeXGlobalHeader = 'g' // global extended header
TypeGNULongName = 'L' // Next file has a long name
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
TypeGNUSparse = 'S' // sparse file
)
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
type Header struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
ModTime time.Time // modified time
Typeflag byte // type of header entry
Linkname string // target name of link
Uname string // user name of owner
Gname string // group name of owner
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
AccessTime time.Time // access time
ChangeTime time.Time // status change time
CreationTime time.Time // creation time
Xattrs map[string]string
Winheaders map[string]string
}
// File name constants from the tar spec.
const (
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
)
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) &^ 07777
if m == c_ISDIR {
// directory
mode |= os.ModeDir
}
if m == c_ISFIFO {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == c_ISLNK {
// symbolic link
mode |= os.ModeSymlink
}
if m == c_ISBLK {
// device file
mode |= os.ModeDevice
}
if m == c_ISCHR {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == c_ISSOCK {
// Unix domain socket
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
// symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
// block device node
mode |= os.ModeDevice
case TypeDir:
// directory
mode |= os.ModeDir
case TypeFifo:
// fifo node
mode |= os.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
// Mode constants from the tar spec.
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// Keywords for the PAX Extended Header
const (
paxAtime = "atime"
paxCharset = "charset"
paxComment = "comment"
paxCtime = "ctime" // please note that ctime is not a valid pax header.
paxCreationTime = "LIBARCHIVE.creationtime"
paxGid = "gid"
paxGname = "gname"
paxLinkpath = "linkpath"
paxMtime = "mtime"
paxPath = "path"
paxSize = "size"
paxUid = "uid"
paxUname = "uname"
paxXattr = "SCHILY.xattr."
paxWindows = "MSWINDOWS."
paxNone = ""
)
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
h.Mode |= c_ISSOCK
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
}
if sysStat != nil {
return h, sysStat(fi, h)
}
return h, nil
}
var zeroBlock = make([]byte, blockSize)
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
// We compute and return both.
func checksum(header []byte) (unsigned int64, signed int64) {
for i := 0; i < len(header); i++ {
if i == 148 {
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
unsigned += ' ' * 8
signed += ' ' * 8
i += 7
continue
}
unsigned += int64(header[i])
signed += int64(int8(header[i]))
}
return
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
switch flag {
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
return true
default:
return false
}
}

View File

@@ -0,0 +1,80 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar_test
import (
"archive/tar"
"bytes"
"fmt"
"io"
"log"
"os"
)
func Example() {
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new tar archive.
tw := tar.NewWriter(buf)
// Add some files to the archive.
var files = []struct {
Name, Body string
}{
{"readme.txt", "This archive contains some text files."},
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
{"todo.txt", "Get animal handling license."},
}
for _, file := range files {
hdr := &tar.Header{
Name: file.Name,
Mode: 0600,
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write([]byte(file.Body)); err != nil {
log.Fatalln(err)
}
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
log.Fatalln(err)
}
// Open the tar archive for reading.
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Contents of %s:\n", hdr.Name)
if _, err := io.Copy(os.Stdout, tr); err != nil {
log.Fatalln(err)
}
fmt.Println()
}
// Output:
// Contents of readme.txt:
// This archive contains some text files.
// Contents of gopher.txt:
// Gopher names:
// George
// Geoffrey
// Gonzo
// Contents of todo.txt:
// Get animal handling license.
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}

View File

@@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd netbsd
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atimespec.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctimespec.Unix())
}

View File

@@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"os"
"syscall"
)
func init() {
sysStat = statUnix
}
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
// TODO(bradfitz): populate username & group. os/user
// doesn't cache LookupId lookups, and lacks group
// lookup functions.
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// TODO(bradfitz): major/minor device numbers?
return nil
}

View File

@@ -0,0 +1,325 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"time"
)
func TestFileInfoHeader(t *testing.T) {
fi, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "small.txt"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(5); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
// FileInfoHeader should error when passing nil FileInfo
if _, err := FileInfoHeader(nil, ""); err == nil {
t.Fatalf("Expected error when passing nil to FileInfoHeader")
}
}
func TestFileInfoHeaderDir(t *testing.T) {
fi, err := os.Stat("testdata")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "testdata/"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
// Ignoring c_ISGID for golang.org/issue/4867
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(0); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
}
func TestFileInfoHeaderSymlink(t *testing.T) {
h, err := FileInfoHeader(symlink{}, "some-target")
if err != nil {
t.Fatal(err)
}
if g, e := h.Name, "some-symlink"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Linkname, "some-target"; g != e {
t.Errorf("Linkname = %q; want %q", g, e)
}
}
type symlink struct{}
func (symlink) Name() string { return "some-symlink" }
func (symlink) Size() int64 { return 0 }
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
func (symlink) ModTime() time.Time { return time.Time{} }
func (symlink) IsDir() bool { return false }
func (symlink) Sys() interface{} { return nil }
func TestRoundTrip(t *testing.T) {
data := []byte("some file contents")
var b bytes.Buffer
tw := NewWriter(&b)
hdr := &Header{
Name: "file.txt",
Uid: 1 << 21, // too big for 8 octal digits
Size: int64(len(data)),
ModTime: time.Now(),
}
// tar only supports second precision.
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("tw.WriteHeader: %v", err)
}
if _, err := tw.Write(data); err != nil {
t.Fatalf("tw.Write: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("tw.Close: %v", err)
}
// Read it back.
tr := NewReader(&b)
rHdr, err := tr.Next()
if err != nil {
t.Fatalf("tr.Next: %v", err)
}
if !reflect.DeepEqual(rHdr, hdr) {
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
}
rData, err := ioutil.ReadAll(tr)
if err != nil {
t.Fatalf("Read: %v", err)
}
if !bytes.Equal(rData, data) {
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
}
}
type headerRoundTripTest struct {
h *Header
fm os.FileMode
}
func TestHeaderRoundTrip(t *testing.T) {
golden := []headerRoundTripTest{
// regular file.
{
h: &Header{
Name: "test.txt",
Mode: 0644 | c_ISREG,
Size: 12,
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeReg,
},
fm: 0644,
},
// symbolic link.
{
h: &Header{
Name: "link.txt",
Mode: 0777 | c_ISLNK,
Size: 0,
ModTime: time.Unix(1360600852, 0),
Typeflag: TypeSymlink,
},
fm: 0777 | os.ModeSymlink,
},
// character device node.
{
h: &Header{
Name: "dev/null",
Mode: 0666 | c_ISCHR,
Size: 0,
ModTime: time.Unix(1360578951, 0),
Typeflag: TypeChar,
},
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
},
// block device node.
{
h: &Header{
Name: "dev/sda",
Mode: 0660 | c_ISBLK,
Size: 0,
ModTime: time.Unix(1360578954, 0),
Typeflag: TypeBlock,
},
fm: 0660 | os.ModeDevice,
},
// directory.
{
h: &Header{
Name: "dir/",
Mode: 0755 | c_ISDIR,
Size: 0,
ModTime: time.Unix(1360601116, 0),
Typeflag: TypeDir,
},
fm: 0755 | os.ModeDir,
},
// fifo node.
{
h: &Header{
Name: "dev/initctl",
Mode: 0600 | c_ISFIFO,
Size: 0,
ModTime: time.Unix(1360578949, 0),
Typeflag: TypeFifo,
},
fm: 0600 | os.ModeNamedPipe,
},
// setuid.
{
h: &Header{
Name: "bin/su",
Mode: 0755 | c_ISREG | c_ISUID,
Size: 23232,
ModTime: time.Unix(1355405093, 0),
Typeflag: TypeReg,
},
fm: 0755 | os.ModeSetuid,
},
// setguid.
{
h: &Header{
Name: "group.txt",
Mode: 0750 | c_ISREG | c_ISGID,
Size: 0,
ModTime: time.Unix(1360602346, 0),
Typeflag: TypeReg,
},
fm: 0750 | os.ModeSetgid,
},
// sticky.
{
h: &Header{
Name: "sticky.txt",
Mode: 0600 | c_ISREG | c_ISVTX,
Size: 7,
ModTime: time.Unix(1360602540, 0),
Typeflag: TypeReg,
},
fm: 0600 | os.ModeSticky,
},
// hard link.
{
h: &Header{
Name: "hard.txt",
Mode: 0644 | c_ISREG,
Size: 0,
Linkname: "file.txt",
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeLink,
},
fm: 0644,
},
// More information.
{
h: &Header{
Name: "info.txt",
Mode: 0600 | c_ISREG,
Size: 0,
Uid: 1000,
Gid: 1000,
ModTime: time.Unix(1360602540, 0),
Uname: "slartibartfast",
Gname: "users",
Typeflag: TypeReg,
},
fm: 0600,
},
}
for i, g := range golden {
fi := g.h.FileInfo()
h2, err := FileInfoHeader(fi, "")
if err != nil {
t.Error(err)
continue
}
if strings.Contains(fi.Name(), "/") {
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
}
name := path.Base(g.h.Name)
if fi.IsDir() {
name += "/"
}
if got, want := h2.Name, name; got != want {
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
}
if got, want := h2.Size, g.h.Size; got != want {
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
}
if got, want := h2.Uid, g.h.Uid; got != want {
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
}
if got, want := h2.Gid, g.h.Gid; got != want {
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
}
if got, want := h2.Uname, g.h.Uname; got != want {
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
}
if got, want := h2.Gname, g.h.Gname; got != want {
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
}
if got, want := h2.Linkname, g.h.Linkname; got != want {
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
}
if got, want := h2.Typeflag, g.h.Typeflag; got != want {
t.Logf("%#v %#v", g.h, fi.Sys())
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
}
if got, want := h2.Mode, g.h.Mode; got != want {
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
}
if got, want := fi.Mode(), g.fm; got != want {
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
}
if got, want := h2.AccessTime, g.h.AccessTime; got != want {
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
}
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want {
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
}
if got, want := h2.ModTime, g.h.ModTime; got != want {
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
}
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
t.Errorf("i=%d: Sys didn't return original *Header", i)
}
}
}

View File

@@ -0,0 +1,444 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - catch more errors (no first header, etc.)
import (
"bytes"
"errors"
"fmt"
"io"
"path"
"sort"
"strconv"
"strings"
"time"
)
var (
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
)
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
// A tar archive consists of a sequence of files.
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
// writing at most hdr.Size bytes in total.
type Writer struct {
w io.Writer
err error
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
closed bool
usedBinary bool // whether the binary numeric field extension was used
preferPax bool // use pax header instead of binary numeric header
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
}
type formatter struct {
err error // Last error seen
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
// Flush finishes writing the current file (optional).
func (tw *Writer) Flush() error {
if tw.nb > 0 {
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
return tw.err
}
n := tw.nb + tw.pad
for n > 0 && tw.err == nil {
nr := n
if nr > blockSize {
nr = blockSize
}
var nw int
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
n -= int64(nw)
}
tw.nb = 0
tw.pad = 0
return tw.err
}
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
f.formatString(b, s)
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
var (
minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime.
maxTime = minTime.Add((1<<33 - 1) * time.Second)
)
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
func (tw *Writer) WriteHeader(hdr *Header) error {
return tw.writeHeader(hdr, true)
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
// As this method is called internally by writePax header to allow it to
// suppress writing the pax header.
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
if tw.closed {
return ErrWriteAfterClose
}
if tw.err == nil {
tw.Flush()
}
if tw.err != nil {
return tw.err
}
// a map to hold pax header records, if any are needed
paxHeaders := make(map[string]string)
// TODO(shanemhansen): we might want to use PAX headers for
// subsecond time resolution, but for now let's just capture
// too long fields or non ascii characters
var f formatter
var header []byte
// We need to select which scratch buffer to use carefully,
// since this method is called recursively to write PAX headers.
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
// already being used by the non-recursive call, so we must use paxHdrBuff.
header = tw.hdrBuff[:]
if !allowPax {
header = tw.paxHdrBuff[:]
}
copy(header, zeroBlock)
s := slicer(header)
// Wrappers around formatter that automatically sets paxHeaders if the
// argument extends beyond the capacity of the input byte slice.
var formatString = func(b []byte, s string, paxKeyword string) {
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
return
}
f.formatString(b, s)
}
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
// Try octal first.
s := strconv.FormatInt(x, 8)
if len(s) < len(b) {
f.formatOctal(b, x)
return
}
// If it is too long for octal, and PAX is preferred, use a PAX header.
if paxKeyword != paxNone && tw.preferPax {
f.formatOctal(b, 0)
s := strconv.FormatInt(x, 10)
paxHeaders[paxKeyword] = s
return
}
tw.usedBinary = true
f.formatNumeric(b, x)
}
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
var unixTime int64
if !t.Before(minTime) && !t.After(maxTime) {
unixTime = t.Unix()
}
formatNumeric(b, unixTime, paxNone)
// Write a PAX header if the time didn't fit precisely.
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
paxHeaders[paxKeyword] = formatPAXTime(t)
}
}
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
pathHeaderBytes := s.next(fileNameSize)
formatString(pathHeaderBytes, hdr.Name, paxPath)
f.formatOctal(s.next(8), hdr.Mode) // 100:108
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
s.next(8) // chksum (148:156)
s.next(1)[0] = hdr.Typeflag // 156:157
formatString(s.next(100), hdr.Linkname, paxLinkpath)
copy(s.next(8), []byte("ustar\x0000")) // 257:265
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
prefixHeaderBytes := s.next(155)
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
if tw.usedBinary {
copy(header[257:265], []byte("ustar \x00"))
}
_, paxPathUsed := paxHeaders[paxPath]
// try to use a ustar header when only the name is too long
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
prefix, suffix, ok := splitUSTARPath(hdr.Name)
if ok {
// Since we can encode in USTAR format, disable PAX header.
delete(paxHeaders, paxPath)
// Update the path fields
formatString(pathHeaderBytes, suffix, paxNone)
formatString(prefixHeaderBytes, prefix, paxNone)
}
}
// The chksum field is terminated by a NUL and a space.
// This is different from the other octal fields.
chksum, _ := checksum(header)
f.formatOctal(header[148:155], chksum) // Never fails
header[155] = ' '
// Check if there were any formatting errors.
if f.err != nil {
tw.err = f.err
return tw.err
}
if allowPax {
if !hdr.AccessTime.IsZero() {
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
}
if !hdr.ChangeTime.IsZero() {
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
}
if !hdr.CreationTime.IsZero() {
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
}
for k, v := range hdr.Xattrs {
paxHeaders[paxXattr+k] = v
}
for k, v := range hdr.Winheaders {
paxHeaders[paxWindows+k] = v
}
}
if len(paxHeaders) > 0 {
if !allowPax {
return errInvalidHeader
}
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
return err
}
}
tw.nb = int64(hdr.Size)
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
_, tw.err = tw.w.Write(header)
return tw.err
}
func formatPAXTime(t time.Time) string {
sec := t.Unix()
usec := t.Nanosecond()
s := strconv.FormatInt(sec, 10)
if usec != 0 {
s = fmt.Sprintf("%s.%09d", s, usec)
}
return s
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
length := len(name)
if length <= fileNameSize || !isASCII(name) {
return "", "", false
} else if length > fileNamePrefixSize+1 {
length = fileNamePrefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
nlen := len(name) - i - 1 // nlen is length of suffix
plen := i // plen is length of prefix
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
return "", "", false
}
return name[:i], name[i+1:], true
}
// writePaxHeader writes an extended pax header to the
// archive.
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
// Prepare extended header
ext := new(Header)
ext.Typeflag = TypeXHeader
// Setting ModTime is required for reader parsing to
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir, "PaxHeaders.0", file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
ascii = ascii[:100]
}
ext.Name = ascii
// Construct the body
var buf bytes.Buffer
// Keys are sorted before writing to body to allow deterministic output.
var keys []string
for k := range paxHeaders {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
}
ext.Size = int64(len(buf.Bytes()))
if err := tw.writeHeader(ext, false); err != nil {
return err
}
if _, err := tw.Write(buf.Bytes()); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}
// Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(b)) > tw.nb {
b = b[0:tw.nb]
overwrite = true
}
n, err = tw.w.Write(b)
tw.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
tw.err = err
return
}
// Close closes the tar archive, flushing any unwritten
// data to the underlying writer.
func (tw *Writer) Close() error {
if tw.err != nil || tw.closed {
return tw.err
}
tw.Flush()
tw.closed = true
if tw.err != nil {
return tw.err
}
// trailer: two zero blocks
for i := 0; i < 2; i++ {
_, tw.err = tw.w.Write(zeroBlock)
if tw.err != nil {
break
}
}
return tw.err
}

View File

@@ -0,0 +1,739 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"reflect"
"sort"
"strings"
"testing"
"testing/iotest"
"time"
)
type writerTestEntry struct {
header *Header
contents string
}
type writerTest struct {
file string // filename of expected output
entries []*writerTestEntry
}
var writerTests = []*writerTest{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
// ln -s small.txt link.txt
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
{
file: "testdata/writer.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
ModTime: time.Unix(1246508266, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
},
{
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
ModTime: time.Unix(1245217492, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
},
{
header: &Header{
Name: "link.txt",
Mode: 0777,
Uid: 1000,
Gid: 1000,
Size: 0,
ModTime: time.Unix(1314603082, 0),
Typeflag: '2',
Linkname: "small.txt",
Uname: "strings",
Gname: "strings",
},
// no contents
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
{
file: "testdata/writer-big.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16 << 30,
ModTime: time.Unix(1254699560, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
{
file: "testdata/writer-big-long.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "16gig.txt",
Mode: 0644,
Uid: 1000,
Gid: 1000,
Size: 16 << 30,
ModTime: time.Unix(1399583047, 0),
Typeflag: '0',
Uname: "guillaume",
Gname: "guillaume",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// This file was produced using gnu tar 1.17
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
{
file: "testdata/ustar.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "file.txt",
Mode: 0644,
Uid: 0765,
Gid: 024,
Size: 06,
ModTime: time.Unix(1360135598, 0),
Typeflag: '0',
Uname: "shane",
Gname: "staff",
},
contents: "hello\n",
},
},
},
// This file was produced using gnu tar 1.26
// echo "Slartibartfast" > file.txt
// ln file.txt hard.txt
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
{
file: "testdata/hardlink.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "file.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 15,
ModTime: time.Unix(1425484303, 0),
Typeflag: '0',
Uname: "vbatts",
Gname: "users",
},
contents: "Slartibartfast\n",
},
{
header: &Header{
Name: "hard.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 0,
ModTime: time.Unix(1425484303, 0),
Typeflag: '1',
Linkname: "file.txt",
Uname: "vbatts",
Gname: "users",
},
// no contents
},
},
},
}
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
func bytestr(offset int, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("%04x ", offset)
for _, ch := range b {
switch {
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
s += fmt.Sprintf(" %c", ch)
default:
s += fmt.Sprintf(" %02x", ch)
}
}
return s
}
// Render a pseudo-diff between two blocks of bytes.
func bytediff(a []byte, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
na, nb := rowLen, rowLen
if na > len(a) {
na = len(a)
}
if nb > len(b) {
nb = len(b)
}
sa := bytestr(offset, a[0:na])
sb := bytestr(offset, b[0:nb])
if sa != sb {
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
}
a = a[na:]
b = b[nb:]
}
return s
}
func TestWriter(t *testing.T) {
testLoop:
for i, test := range writerTests {
expected, err := ioutil.ReadFile(test.file)
if err != nil {
t.Errorf("test %d: Unexpected error: %v", i, err)
continue
}
buf := new(bytes.Buffer)
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
big := false
for j, entry := range test.entries {
big = big || entry.header.Size > 1<<10
if err := tw.WriteHeader(entry.header); err != nil {
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
continue testLoop
}
if _, err := io.WriteString(tw, entry.contents); err != nil {
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
continue testLoop
}
}
// Only interested in Close failures for the small tests.
if err := tw.Close(); err != nil && !big {
t.Errorf("test %d: Failed closing archive: %v", i, err)
continue testLoop
}
actual := buf.Bytes()
if !bytes.Equal(expected, actual) {
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
i, bytediff(expected, actual))
}
if testing.Short() { // The second test is expensive.
break
}
}
}
func TestPax(t *testing.T) {
// Create an archive with a large name
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
// Force a PAX long name to be written
longName := strings.Repeat("ab", 100)
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Name = longName
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long file name")
}
}
func TestPaxSymlink(t *testing.T) {
// Create an archive with a large linkname
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeSymlink
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long linkname to be written
longLinkname := strings.Repeat("1234567890/1234567890", 10)
hdr.Linkname = longLinkname
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Linkname != longLinkname {
t.Fatal("Couldn't recover long link name")
}
}
func TestPaxNonAscii(t *testing.T) {
// Create an archive with non ascii. These should trigger a pax header
// because pax headers have a defined utf-8 encoding.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// some sample data
chineseFilename := "文件名"
chineseGroupname := "組"
chineseUsername := "用戶名"
hdr.Name = chineseFilename
hdr.Gname = chineseGroupname
hdr.Uname = chineseUsername
contents := strings.Repeat(" ", int(hdr.Size))
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != chineseFilename {
t.Fatal("Couldn't recover unicode name")
}
if hdr.Gname != chineseGroupname {
t.Fatal("Couldn't recover unicode group")
}
if hdr.Uname != chineseUsername {
t.Fatal("Couldn't recover unicode user")
}
}
func TestPaxXattrs(t *testing.T) {
xattrs := map[string]string{
"user.key": "value",
}
// Create an archive with an xattr
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := "Kilts"
hdr.Xattrs = xattrs
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get the xattrs back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
hdr.Xattrs, xattrs)
}
}
func TestPaxHeadersSorted(t *testing.T) {
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Xattrs = map[string]string{
"foo": "foo",
"bar": "bar",
"baz": "baz",
"qux": "qux",
}
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// xattr bar should always appear before others
indices := []int{
bytes.Index(buf.Bytes(), []byte("bar=bar")),
bytes.Index(buf.Bytes(), []byte("baz=baz")),
bytes.Index(buf.Bytes(), []byte("foo=foo")),
bytes.Index(buf.Bytes(), []byte("qux=qux")),
}
if !sort.IntsAreSorted(indices) {
t.Fatal("PAX headers are not sorted")
}
}
func TestUSTARLongName(t *testing.T) {
// Create an archive with a path that failed to split with USTAR extension in previous versions.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeDir
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long name to be written. The name was taken from a practical example
// that fails and replaced ever char through numbers to anonymize the sample.
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
hdr.Name = longName
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long name")
}
}
func TestValidTypeflagWithPAXHeader(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
fileName := strings.Repeat("ab", 100)
hdr := &Header{
Name: fileName,
Size: 4,
Typeflag: 0,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
if _, err := tw.Write([]byte("fooo")); err != nil {
t.Fatalf("Failed to write the file's data: %s", err)
}
tw.Close()
tr := NewReader(&buffer)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("Failed to read header: %s", err)
}
if header.Typeflag != 0 {
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
}
}
}
func TestWriteAfterClose(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
hdr := &Header{
Name: "small.txt",
Size: 5,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
tw.Close()
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
}
}
func TestSplitUSTARPath(t *testing.T) {
var sr = strings.Repeat
var vectors = []struct {
input string // Input path
prefix string // Expected output prefix
suffix string // Expected output suffix
ok bool // Split success?
}{
{"", "", "", false},
{"abc", "", "", false},
{"用戶名", "", "", false},
{sr("a", fileNameSize), "", "", false},
{sr("a", fileNameSize) + "/", "", "", false},
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
{sr("a", fileNamePrefixSize) + "/", "", "", false},
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
{sr("a", fileNameSize+1), "", "", false},
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
}
for _, v := range vectors {
prefix, suffix, ok := splitUSTARPath(v.input)
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
}
}
}
func TestFormatPAXRecord(t *testing.T) {
var medName = strings.Repeat("CD", 50)
var longName = strings.Repeat("AB", 100)
var vectors = []struct {
inputKey string
inputVal string
output string
}{
{"k", "v", "6 k=v\n"},
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
{"path", longName, "210 path=" + longName + "\n"},
{"path", medName, "110 path=" + medName + "\n"},
{"foo", "ba", "9 foo=ba\n"},
{"foo", "bar", "11 foo=bar\n"},
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
}
for _, v := range vectors {
output := formatPAXRecord(v.inputKey, v.inputVal)
if output != v.output {
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
v.inputKey, v.inputVal, output, v.output)
}
}
}
func TestFitsInBase256(t *testing.T) {
var vectors = []struct {
input int64
width int
ok bool
}{
{+1, 8, true},
{0, 8, true},
{-1, 8, true},
{1 << 56, 8, false},
{(1 << 56) - 1, 8, true},
{-1 << 56, 8, true},
{(-1 << 56) - 1, 8, false},
{121654, 8, true},
{-9849849, 8, true},
{math.MaxInt64, 9, true},
{0, 9, true},
{math.MinInt64, 9, true},
{math.MaxInt64, 12, true},
{0, 12, true},
{math.MinInt64, 12, true},
}
for _, v := range vectors {
ok := fitsInBase256(v.width, v.input)
if ok != v.ok {
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
}
}
}
func TestFormatNumeric(t *testing.T) {
var vectors = []struct {
input int64
output string
ok bool
}{
// Test base-256 (binary) encoded values.
{-1, "\xff", true},
{-1, "\xff\xff", true},
{-1, "\xff\xff\xff", true},
{(1 << 0), "0", false},
{(1 << 8) - 1, "\x80\xff", true},
{(1 << 8), "0\x00", false},
{(1 << 16) - 1, "\x80\xff\xff", true},
{(1 << 16), "00\x00", false},
{-1 * (1 << 0), "\xff", true},
{-1*(1<<0) - 1, "0", false},
{-1 * (1 << 8), "\xff\x00", true},
{-1*(1<<8) - 1, "0\x00", false},
{-1 * (1 << 16), "\xff\x00\x00", true},
{-1*(1<<16) - 1, "00\x00", false},
{537795476381659745, "0000000\x00", false},
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
{-615126028225187231, "0000000\x00", false},
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
{math.MaxInt64, "0000000\x00", false},
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "0000000\x00", false},
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
}
for _, v := range vectors {
var f formatter
output := make([]byte, len(v.output))
f.formatNumeric(output, v.input)
ok := (f.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
} else {
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
}
}
if string(output) != v.output {
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
}
}
}
func TestFormatPAXTime(t *testing.T) {
t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC)
t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC)
t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC)
t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
verify := func(time time.Time, s string) {
p := formatPAXTime(time)
if p != s {
t.Errorf("for %v, expected %s, got %s", time, s, p)
}
}
verify(t1, "946724400")
verify(t2, "946724400.000000100")
verify(t3, "-315579600")
verify(t4, "0")
}

280
vendor/github.com/Microsoft/go-winio/backup.go generated vendored Normal file
View File

@@ -0,0 +1,280 @@
// +build windows
package winio
import (
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"syscall"
"unicode/utf16"
)
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
const (
BackupData = uint32(iota + 1)
BackupEaData
BackupSecurity
BackupAlternateData
BackupLink
BackupPropertyData
BackupObjectId
BackupReparseData
BackupSparseBlock
BackupTxfsData
)
const (
StreamSparseAttributes = uint32(8)
)
const (
WRITE_DAC = 0x40000
WRITE_OWNER = 0x80000
ACCESS_SYSTEM_SECURITY = 0x1000000
)
// BackupHeader represents a backup stream of a file.
type BackupHeader struct {
Id uint32 // The backup stream ID
Attributes uint32 // Stream attributes
Size int64 // The size of the stream in bytes
Name string // The name of the stream (for BackupAlternateData only).
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
}
type win32StreamId struct {
StreamId uint32
Attributes uint32
Size uint64
NameSize uint32
}
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
// of BackupHeader values.
type BackupStreamReader struct {
r io.Reader
bytesLeft int64
}
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
return &BackupStreamReader{r, 0}
}
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
// it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
if r.bytesLeft > 0 {
if s, ok := r.r.(io.Seeker); ok {
// Make sure Seek on io.SeekCurrent sometimes succeeds
// before trying the actual seek.
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
return nil, err
}
r.bytesLeft = 0
}
}
if _, err := io.Copy(ioutil.Discard, r); err != nil {
return nil, err
}
}
var wsi win32StreamId
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
return nil, err
}
hdr := &BackupHeader{
Id: wsi.StreamId,
Attributes: wsi.Attributes,
Size: int64(wsi.Size),
}
if wsi.NameSize != 0 {
name := make([]uint16, int(wsi.NameSize/2))
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
return nil, err
}
hdr.Name = syscall.UTF16ToString(name)
}
if wsi.StreamId == BackupSparseBlock {
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
return nil, err
}
hdr.Size -= 8
}
r.bytesLeft = hdr.Size
return hdr, nil
}
// Read reads from the current backup stream.
func (r *BackupStreamReader) Read(b []byte) (int, error) {
if r.bytesLeft == 0 {
return 0, io.EOF
}
if int64(len(b)) > r.bytesLeft {
b = b[:r.bytesLeft]
}
n, err := r.r.Read(b)
r.bytesLeft -= int64(n)
if err == io.EOF {
err = io.ErrUnexpectedEOF
} else if r.bytesLeft == 0 && err == nil {
err = io.EOF
}
return n, err
}
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
type BackupStreamWriter struct {
w io.Writer
bytesLeft int64
}
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
return &BackupStreamWriter{w, 0}
}
// WriteHeader writes the next backup stream header and prepares for calls to Write().
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
if w.bytesLeft != 0 {
return fmt.Errorf("missing %d bytes", w.bytesLeft)
}
name := utf16.Encode([]rune(hdr.Name))
wsi := win32StreamId{
StreamId: hdr.Id,
Attributes: hdr.Attributes,
Size: uint64(hdr.Size),
NameSize: uint32(len(name) * 2),
}
if hdr.Id == BackupSparseBlock {
// Include space for the int64 block offset
wsi.Size += 8
}
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
return err
}
if len(name) != 0 {
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
return err
}
}
if hdr.Id == BackupSparseBlock {
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
return err
}
}
w.bytesLeft = hdr.Size
return nil
}
// Write writes to the current backup stream.
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
if w.bytesLeft < int64(len(b)) {
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
}
n, err := w.w.Write(b)
w.bytesLeft -= int64(n)
return n, err
}
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
type BackupFileReader struct {
f *os.File
includeSecurity bool
ctx uintptr
}
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
// Read will attempt to read the security descriptor of the file.
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
r := &BackupFileReader{f, includeSecurity, 0}
return r
}
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
func (r *BackupFileReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
if err != nil {
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
}
runtime.KeepAlive(r.f)
if bytesRead == 0 {
return 0, io.EOF
}
return int(bytesRead), nil
}
// Close frees Win32 resources associated with the BackupFileReader. It does not close
// the underlying file.
func (r *BackupFileReader) Close() error {
if r.ctx != 0 {
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
runtime.KeepAlive(r.f)
r.ctx = 0
}
return nil
}
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
type BackupFileWriter struct {
f *os.File
includeSecurity bool
ctx uintptr
}
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
// Write() will attempt to restore the security descriptor from the stream.
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
w := &BackupFileWriter{f, includeSecurity, 0}
return w
}
// Write restores a portion of the file using the provided backup stream.
func (w *BackupFileWriter) Write(b []byte) (int, error) {
var bytesWritten uint32
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
if err != nil {
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
}
runtime.KeepAlive(w.f)
if int(bytesWritten) != len(b) {
return int(bytesWritten), errors.New("not all bytes could be written")
}
return len(b), nil
}
// Close frees Win32 resources associated with the BackupFileWriter. It does not
// close the underlying file.
func (w *BackupFileWriter) Close() error {
if w.ctx != 0 {
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
runtime.KeepAlive(w.f)
w.ctx = 0
}
return nil
}
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
// or restore privileges have been acquired.
//
// If the file opened was a directory, it cannot be used with Readdir().
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
winPath, err := syscall.UTF16FromString(path)
if err != nil {
return nil, err
}
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
if err != nil {
err = &os.PathError{Op: "open", Path: path, Err: err}
return nil, err
}
return os.NewFile(uintptr(h), path), nil
}

255
vendor/github.com/Microsoft/go-winio/backup_test.go generated vendored Normal file
View File

@@ -0,0 +1,255 @@
package winio
import (
"io"
"io/ioutil"
"os"
"syscall"
"testing"
)
var testFileName string
func TestMain(m *testing.M) {
f, err := ioutil.TempFile("", "tmp")
if err != nil {
panic(err)
}
testFileName = f.Name()
f.Close()
defer os.Remove(testFileName)
os.Exit(m.Run())
}
func makeTestFile(makeADS bool) error {
os.Remove(testFileName)
f, err := os.Create(testFileName)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write([]byte("testing 1 2 3\n"))
if err != nil {
return err
}
if makeADS {
a, err := os.Create(testFileName + ":ads.txt")
if err != nil {
return err
}
defer a.Close()
_, err = a.Write([]byte("alternate data stream\n"))
if err != nil {
return err
}
}
return nil
}
func TestBackupRead(t *testing.T) {
err := makeTestFile(true)
if err != nil {
t.Fatal(err)
}
f, err := os.Open(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := NewBackupFileReader(f, false)
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if len(b) == 0 {
t.Fatal("no data")
}
}
func TestBackupStreamRead(t *testing.T) {
err := makeTestFile(true)
if err != nil {
t.Fatal(err)
}
f, err := os.Open(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := NewBackupFileReader(f, false)
defer r.Close()
br := NewBackupStreamReader(r)
gotData := false
gotAltData := false
for {
hdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
switch hdr.Id {
case BackupData:
if gotData {
t.Fatal("duplicate data")
}
if hdr.Name != "" {
t.Fatalf("unexpected name %s", hdr.Name)
}
b, err := ioutil.ReadAll(br)
if err != nil {
t.Fatal(err)
}
if string(b) != "testing 1 2 3\n" {
t.Fatalf("incorrect data %v", b)
}
gotData = true
case BackupAlternateData:
if gotAltData {
t.Fatal("duplicate alt data")
}
if hdr.Name != ":ads.txt:$DATA" {
t.Fatalf("incorrect name %s", hdr.Name)
}
b, err := ioutil.ReadAll(br)
if err != nil {
t.Fatal(err)
}
if string(b) != "alternate data stream\n" {
t.Fatalf("incorrect data %v", b)
}
gotAltData = true
default:
t.Fatalf("unknown stream ID %d", hdr.Id)
}
}
if !gotData || !gotAltData {
t.Fatal("missing stream")
}
}
func TestBackupStreamWrite(t *testing.T) {
f, err := os.Create(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
w := NewBackupFileWriter(f, false)
defer w.Close()
data := "testing 1 2 3\n"
altData := "alternate stream\n"
br := NewBackupStreamWriter(w)
err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))})
if err != nil {
t.Fatal(err)
}
n, err := br.Write([]byte(data))
if err != nil {
t.Fatal(err)
}
if n != len(data) {
t.Fatal("short write")
}
err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"})
if err != nil {
t.Fatal(err)
}
n, err = br.Write([]byte(altData))
if err != nil {
t.Fatal(err)
}
if n != len(altData) {
t.Fatal("short write")
}
f.Close()
b, err := ioutil.ReadFile(testFileName)
if err != nil {
t.Fatal(err)
}
if string(b) != data {
t.Fatalf("wrong data %v", b)
}
b, err = ioutil.ReadFile(testFileName + ":ads.txt")
if err != nil {
t.Fatal(err)
}
if string(b) != altData {
t.Fatalf("wrong data %v", b)
}
}
func makeSparseFile() error {
os.Remove(testFileName)
f, err := os.Create(testFileName)
if err != nil {
return err
}
defer f.Close()
const (
FSCTL_SET_SPARSE = 0x000900c4
FSCTL_SET_ZERO_DATA = 0x000980c8
)
err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
if err != nil {
return err
}
_, err = f.Write([]byte("testing 1 2 3\n"))
if err != nil {
return err
}
_, err = f.Seek(1000000, 0)
if err != nil {
return err
}
_, err = f.Write([]byte("more data later\n"))
if err != nil {
return err
}
return nil
}
func TestBackupSparseFile(t *testing.T) {
err := makeSparseFile()
if err != nil {
t.Fatal(err)
}
f, err := os.Open(testFileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := NewBackupFileReader(f, false)
defer r.Close()
br := NewBackupStreamReader(r)
for {
hdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
t.Log(hdr)
}
}

View File

@@ -0,0 +1,4 @@
// +build !windows
// This file only exists to allow go get on non-Windows platforms.
package backuptar

439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go generated vendored Normal file
View File

@@ -0,0 +1,439 @@
// +build windows
package backuptar
import (
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
const (
hdrFileAttributes = "fileattr"
hdrSecurityDescriptor = "sd"
hdrRawSecurityDescriptor = "rawsd"
hdrMountPoint = "mountpoint"
hdrEaPrefix = "xattr."
)
func writeZeroes(w io.Writer, count int64) error {
buf := make([]byte, 8192)
c := len(buf)
for i := int64(0); i < count; i += int64(c) {
if int64(c) > count-i {
c = int(count - i)
}
_, err := w.Write(buf[:c])
if err != nil {
return err
}
}
return nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if bhdr.Id != winio.BackupSparseBlock {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
err = writeZeroes(t, bhdr.Offset-curOffset)
if bhdr.Size == 0 {
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
curOffset = bhdr.Offset + n
}
return nil
}
// BasicInfoHeader creates a tar header from basic file information.
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
hdr := &tar.Header{
Name: filepath.ToSlash(name),
Size: size,
Typeflag: tar.TypeReg,
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
Winheaders: make(map[string]string),
}
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
return hdr
}
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
//
// The additional Win32 metadata is:
//
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
//
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
//
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := BasicInfoHeader(name, size, fileInfo)
// If r can be seeked, then this function is two-pass: pass 1 collects the
// tar header data, and pass 2 copies the data stream. If r cannot be
// seeked, then some header data (in particular EAs) will be silently lost.
var (
restartPos int64
err error
)
sr, readTwice := r.(io.Seeker)
if readTwice {
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
readTwice = false
}
}
br := winio.NewBackupStreamReader(r)
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupData:
hdr.Mode |= c_ISREG
if !readTwice {
dataHdr = bhdr
}
case winio.BackupSecurity:
sd, err := ioutil.ReadAll(br)
if err != nil {
return err
}
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
hdr.Typeflag = tar.TypeSymlink
reparseBuffer, err := ioutil.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
}
if rp.IsMountPoint {
hdr.Winheaders[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
case winio.BackupEaData:
eab, err := ioutil.ReadAll(br)
if err != nil {
return err
}
eas, err := winio.DecodeExtendedAttributes(eab)
if err != nil {
return err
}
for _, ea := range eas {
// Use base64 encoding for the binary value. Note that there
// is no way to encode the EA's flags, since their use doesn't
// make any sense for persisted EAs.
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
}
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
}
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
if readTwice {
// Get back to the data stream.
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
return err
}
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if bhdr.Id == winio.BackupData {
dataHdr = bhdr
}
}
}
if dataHdr != nil {
// A data stream was found. Copy the data.
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
err = copySparse(t, br)
if err != nil {
return err
}
}
}
// Look for streams after the data stream. The only ones we handle are alternate data streams.
// Other streams may have metadata that could be serialized, but the tar header has already
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupAlternateData:
altName := bhdr.Name
if strings.HasSuffix(altName, ":$DATA") {
altName = altName[:len(altName)-len(":$DATA")]
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
return errors.New("tar of sparse alternate data streams is unsupported")
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
}
}
return nil
}
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
// WriteTarFileFromBackupStream.
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
}
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
}
fileInfo.FileAttributes = uint32(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
return
}
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
var sd []byte
var err error
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
// by this library will have raw binary for the security descriptor.
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
sd, err = winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
}
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
sd, err = base64.StdEncoding.DecodeString(sdraw)
if err != nil {
return nil, err
}
}
if len(sd) != 0 {
bhdr := winio.BackupHeader{
Id: winio.BackupSecurity,
Size: int64(len(sd)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(sd)
if err != nil {
return nil, err
}
}
var eas []winio.ExtendedAttribute
for k, v := range hdr.Winheaders {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
}
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return nil, err
}
eas = append(eas, winio.ExtendedAttribute{
Name: k[len(hdrEaPrefix):],
Value: data,
})
}
if len(eas) != 0 {
eadata, err := winio.EncodeExtendedAttributes(eas)
if err != nil {
return nil, err
}
bhdr := winio.BackupHeader{
Id: winio.BackupEaData,
Size: int64(len(eadata)),
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(eadata)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeSymlink {
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
rp := winio.ReparsePoint{
Target: filepath.FromSlash(hdr.Linkname),
IsMountPoint: isMountPoint,
}
reparse := winio.EncodeReparsePoint(&rp)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(reparse)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
// Copy all the alternate data streams and return the next non-ADS header.
for {
ahdr, err := t.Next()
if err != nil {
return nil, err
}
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
return ahdr, nil
}
bhdr := winio.BackupHeader{
Id: winio.BackupAlternateData,
Size: ahdr.Size,
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
}

View File

@@ -0,0 +1,84 @@
package backuptar
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar"
)
func ensurePresent(t *testing.T, m map[string]string, keys ...string) {
for _, k := range keys {
if _, ok := m[k]; !ok {
t.Error(k, "not present in tar header")
}
}
}
func TestRoundTrip(t *testing.T) {
f, err := ioutil.TempFile("", "tst")
if err != nil {
t.Fatal(err)
}
defer f.Close()
defer os.Remove(f.Name())
if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil {
t.Fatal(err)
}
if _, err = f.Seek(0, 0); err != nil {
t.Fatal(err)
}
fi, err := f.Stat()
if err != nil {
t.Fatal(err)
}
bi, err := winio.GetFileBasicInfo(f)
if err != nil {
t.Fatal(err)
}
br := winio.NewBackupFileReader(f, true)
defer br.Close()
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi)
if err != nil {
t.Fatal(err)
}
tr := tar.NewReader(&buf)
hdr, err := tr.Next()
if err != nil {
t.Fatal(err)
}
name, size, bi2, err := FileInfoFromHeader(hdr)
if err != nil {
t.Fatal(err)
}
if name != filepath.ToSlash(f.Name()) {
t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name()))
}
if size != fi.Size() {
t.Errorf("got size %d, expected %d", size, fi.Size())
}
if !reflect.DeepEqual(*bi, *bi2) {
t.Errorf("got %#v, expected %#v", *bi, *bi2)
}
ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd")
}

137
vendor/github.com/Microsoft/go-winio/ea.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

89
vendor/github.com/Microsoft/go-winio/ea_test.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
package winio
import (
"io/ioutil"
"os"
"reflect"
"syscall"
"testing"
"unsafe"
)
var (
testEas = []ExtendedAttribute{
{Name: "foo", Value: []byte("bar")},
{Name: "fizz", Value: []byte("buzz")},
}
testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0}
testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3]
testEasTruncated = testEasEncoded[0:20]
)
func Test_RoundTripEas(t *testing.T) {
b, err := EncodeExtendedAttributes(testEas)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(testEasEncoded, b) {
t.Fatalf("encoded mismatch %v %v", testEasEncoded, b)
}
eas, err := DecodeExtendedAttributes(b)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(testEas, eas) {
t.Fatalf("mismatch %+v %+v", testEas, eas)
}
}
func Test_EasDontNeedPaddingAtEnd(t *testing.T) {
eas, err := DecodeExtendedAttributes(testEasNotPadded)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(testEas, eas) {
t.Fatalf("mismatch %+v %+v", testEas, eas)
}
}
func Test_TruncatedEasFailCorrectly(t *testing.T) {
_, err := DecodeExtendedAttributes(testEasTruncated)
if err == nil {
t.Fatal("expected error")
}
}
func Test_NilEasEncodeAndDecodeAsNil(t *testing.T) {
b, err := EncodeExtendedAttributes(nil)
if err != nil {
t.Fatal(err)
}
if len(b) != 0 {
t.Fatal("expected empty")
}
eas, err := DecodeExtendedAttributes(nil)
if err != nil {
t.Fatal(err)
}
if len(eas) != 0 {
t.Fatal("expected empty")
}
}
// Test_SetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile.
func Test_SetFileEa(t *testing.T) {
f, err := ioutil.TempFile("", "winio")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
defer f.Close()
ntdll := syscall.MustLoadDLL("ntdll.dll")
ntSetEaFile := ntdll.MustFindProc("NtSetEaFile")
var iosb [2]uintptr
r, _, _ := ntSetEaFile.Call(f.Fd(), uintptr(unsafe.Pointer(&iosb[0])), uintptr(unsafe.Pointer(&testEasEncoded[0])), uintptr(len(testEasEncoded)))
if r != 0 {
t.Fatalf("NtSetEaFile failed with %08x", r)
}
}

307
vendor/github.com/Microsoft/go-winio/file.go generated vendored Normal file
View File

@@ -0,0 +1,307 @@
// +build windows
package winio
import (
"errors"
"io"
"runtime"
"sync"
"sync/atomic"
"syscall"
"time"
)
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
func (b *atomicBool) swap(new bool) bool {
var newInt int32
if new {
newInt = 1
}
return atomic.SwapInt32((*int32)(b), newInt) == 1
}
const (
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
)
var (
ErrFileClosed = errors.New("file has already been closed")
ErrTimeout = &timeoutError{}
)
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }
type timeoutChan chan struct{}
var ioInitOnce sync.Once
var ioCompletionPort syscall.Handle
// ioResult contains the result of an asynchronous IO operation
type ioResult struct {
bytes uint32
err error
}
// ioOperation represents an outstanding asynchronous Win32 IO
type ioOperation struct {
o syscall.Overlapped
ch chan ioResult
}
func initIo() {
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
panic(err)
}
ioCompletionPort = h
go ioCompletionProcessor(h)
}
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
// It takes ownership of this handle and will close it if it is garbage collected.
type win32File struct {
handle syscall.Handle
wg sync.WaitGroup
wgLock sync.RWMutex
closing atomicBool
readDeadline deadlineHandler
writeDeadline deadlineHandler
}
type deadlineHandler struct {
setLock sync.Mutex
channel timeoutChan
channelLock sync.RWMutex
timer *time.Timer
timedout atomicBool
}
// makeWin32File makes a new win32File from an existing file handle
func makeWin32File(h syscall.Handle) (*win32File, error) {
f := &win32File{handle: h}
ioInitOnce.Do(initIo)
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
if err != nil {
return nil, err
}
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
if err != nil {
return nil, err
}
f.readDeadline.channel = make(timeoutChan)
f.writeDeadline.channel = make(timeoutChan)
return f, nil
}
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return makeWin32File(h)
}
// closeHandle closes the resources associated with a Win32 handle
func (f *win32File) closeHandle() {
f.wgLock.Lock()
// Atomically set that we are closing, releasing the resources only once.
if !f.closing.swap(true) {
f.wgLock.Unlock()
// cancel all IO and wait for it to complete
cancelIoEx(f.handle, nil)
f.wg.Wait()
// at this point, no new IO can start
syscall.Close(f.handle)
f.handle = 0
} else {
f.wgLock.Unlock()
}
}
// Close closes a win32File.
func (f *win32File) Close() error {
f.closeHandle()
return nil
}
// prepareIo prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIo() (*ioOperation, error) {
f.wgLock.RLock()
if f.closing.isSet() {
f.wgLock.RUnlock()
return nil, ErrFileClosed
}
f.wg.Add(1)
f.wgLock.RUnlock()
c := &ioOperation{}
c.ch = make(chan ioResult)
return c, nil
}
// ioCompletionProcessor processes completed async IOs forever
func ioCompletionProcessor(h syscall.Handle) {
for {
var bytes uint32
var key uintptr
var op *ioOperation
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
if op == nil {
panic(err)
}
op.ch <- ioResult{bytes, err}
}
}
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed.
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING {
return int(bytes), err
}
if f.closing.isSet() {
cancelIoEx(f.handle, &c.o)
}
var timeout timeoutChan
if d != nil {
d.channelLock.Lock()
timeout = d.channel
d.channelLock.Unlock()
}
var r ioResult
select {
case r = <-c.ch:
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED {
if f.closing.isSet() {
err = ErrFileClosed
}
}
case <-timeout:
cancelIoEx(f.handle, &c.o)
r = <-c.ch
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED {
err = ErrTimeout
}
}
// runtime.KeepAlive is needed, as c is passed via native
// code to ioCompletionProcessor, c must remain alive
// until the channel read is complete.
runtime.KeepAlive(c)
return int(r.bytes), err
}
// Read reads from a file handle.
func (f *win32File) Read(b []byte) (int, error) {
c, err := f.prepareIo()
if err != nil {
return 0, err
}
defer f.wg.Done()
if f.readDeadline.timedout.isSet() {
return 0, ErrTimeout
}
var bytes uint32
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
runtime.KeepAlive(b)
// Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 {
return 0, io.EOF
} else if err == syscall.ERROR_BROKEN_PIPE {
return 0, io.EOF
} else {
return n, err
}
}
// Write writes to a file handle.
func (f *win32File) Write(b []byte) (int, error) {
c, err := f.prepareIo()
if err != nil {
return 0, err
}
defer f.wg.Done()
if f.writeDeadline.timedout.isSet() {
return 0, ErrTimeout
}
var bytes uint32
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
runtime.KeepAlive(b)
return n, err
}
func (f *win32File) SetReadDeadline(deadline time.Time) error {
return f.readDeadline.set(deadline)
}
func (f *win32File) SetWriteDeadline(deadline time.Time) error {
return f.writeDeadline.set(deadline)
}
func (f *win32File) Flush() error {
return syscall.FlushFileBuffers(f.handle)
}
func (d *deadlineHandler) set(deadline time.Time) error {
d.setLock.Lock()
defer d.setLock.Unlock()
if d.timer != nil {
if !d.timer.Stop() {
<-d.channel
}
d.timer = nil
}
d.timedout.setFalse()
select {
case <-d.channel:
d.channelLock.Lock()
d.channel = make(chan struct{})
d.channelLock.Unlock()
default:
}
if deadline.IsZero() {
return nil
}
timeoutIO := func() {
d.timedout.setTrue()
close(d.channel)
}
now := time.Now()
duration := deadline.Sub(now)
if deadline.After(now) {
// Deadline is in the future, set a timer to wait
d.timer = time.AfterFunc(duration, timeoutIO)
} else {
// Deadline is in the past. Cancel all pending IO now.
timeoutIO()
}
return nil
}

61
vendor/github.com/Microsoft/go-winio/fileinfo.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// +build windows
package winio
import (
"os"
"runtime"
"syscall"
"unsafe"
)
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
const (
fileBasicInfo = 0
fileIDInfo = 0x12
)
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uint32
pad uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return bi, nil
}
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return nil
}
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
// unique on a system.
type FileIDInfo struct {
VolumeSerialNumber uint64
FileID [16]byte
}
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return fileID, nil
}

View File

@@ -0,0 +1,15 @@
// Package etw provides support for TraceLogging-based ETW (Event Tracing
// for Windows). TraceLogging is a format of ETW events that are self-describing
// (the event contains information on its own schema). This allows them to be
// decoded without needing a separate manifest with event information. The
// implementation here is based on the information found in
// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a
// set of C macros.
package etw
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go etw.go
//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister
//sys eventUnregister(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister
//sys eventWriteTransfer(providerHandle providerHandle, descriptor *EventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
//sys eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation

View File

@@ -0,0 +1,65 @@
package etw
import (
"bytes"
"encoding/binary"
)
// EventData maintains a buffer which builds up the data for an ETW event. It
// needs to be paired with EventMetadata which describes the event.
type EventData struct {
buffer bytes.Buffer
}
// Bytes returns the raw binary data containing the event data. The returned
// value is not copied from the internal buffer, so it can be mutated by the
// EventData object after it is returned.
func (ed *EventData) Bytes() []byte {
return ed.buffer.Bytes()
}
// WriteString appends a string, including the null terminator, to the buffer.
func (ed *EventData) WriteString(data string) {
ed.buffer.WriteString(data)
ed.buffer.WriteByte(0)
}
// WriteInt8 appends a int8 to the buffer.
func (ed *EventData) WriteInt8(value int8) {
ed.buffer.WriteByte(uint8(value))
}
// WriteInt16 appends a int16 to the buffer.
func (ed *EventData) WriteInt16(value int16) {
binary.Write(&ed.buffer, binary.LittleEndian, value)
}
// WriteInt32 appends a int32 to the buffer.
func (ed *EventData) WriteInt32(value int32) {
binary.Write(&ed.buffer, binary.LittleEndian, value)
}
// WriteInt64 appends a int64 to the buffer.
func (ed *EventData) WriteInt64(value int64) {
binary.Write(&ed.buffer, binary.LittleEndian, value)
}
// WriteUint8 appends a uint8 to the buffer.
func (ed *EventData) WriteUint8(value uint8) {
ed.buffer.WriteByte(value)
}
// WriteUint16 appends a uint16 to the buffer.
func (ed *EventData) WriteUint16(value uint16) {
binary.Write(&ed.buffer, binary.LittleEndian, value)
}
// WriteUint32 appends a uint32 to the buffer.
func (ed *EventData) WriteUint32(value uint32) {
binary.Write(&ed.buffer, binary.LittleEndian, value)
}
// WriteUint64 appends a uint64 to the buffer.
func (ed *EventData) WriteUint64(value uint64) {
binary.Write(&ed.buffer, binary.LittleEndian, value)
}

View File

@@ -0,0 +1,29 @@
package etw
import (
"unsafe"
)
type eventDataDescriptorType uint8
const (
eventDataDescriptorTypeUserData eventDataDescriptorType = iota
eventDataDescriptorTypeEventMetadata
eventDataDescriptorTypeProviderMetadata
)
type eventDataDescriptor struct {
ptr ptr64
size uint32
dataType eventDataDescriptorType
reserved1 uint8
reserved2 uint16
}
func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor {
return eventDataDescriptor{
ptr: ptr64{ptr: unsafe.Pointer(&buffer[0])},
size: uint32(len(buffer)),
dataType: dataType,
}
}

View File

@@ -0,0 +1,67 @@
package etw
// Channel represents the ETW logging channel that is used. It can be used by
// event consumers to give an event special treatment.
type Channel uint8
const (
// ChannelTraceLogging is the default channel for TraceLogging events. It is
// not required to be used for TraceLogging, but will prevent decoding
// issues for these events on older operating systems.
ChannelTraceLogging Channel = 11
)
// Level represents the ETW logging level. There are several predefined levels
// that are commonly used, but technically anything from 0-255 is allowed.
// Lower levels indicate more important events, and 0 indicates an event that
// will always be collected.
type Level uint8
// Predefined ETW log levels.
const (
LevelAlways Level = iota
LevelCritical
LevelError
LevelWarning
LevelInfo
LevelVerbose
)
// EventDescriptor represents various metadata for an ETW event.
type EventDescriptor struct {
id uint16
version uint8
Channel Channel
Level Level
Opcode uint8
Task uint16
Keyword uint64
}
// NewEventDescriptor returns an EventDescriptor initialized for use with
// TraceLogging.
func NewEventDescriptor() *EventDescriptor {
// Standard TraceLogging events default to the TraceLogging channel, and
// verbose level.
return &EventDescriptor{
Channel: ChannelTraceLogging,
Level: LevelVerbose,
}
}
// Identity returns the identity of the event. If the identity is not 0, it
// should uniquely identify the other event metadata (contained in
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
// are relevant.
func (ed *EventDescriptor) Identity() uint32 {
return (uint32(ed.version) << 16) | uint32(ed.id)
}
// SetIdentity sets the identity of the event. If the identity is not 0, it
// should uniquely identify the other event metadata (contained in
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
// are relevant.
func (ed *EventDescriptor) SetIdentity(identity uint32) {
ed.id = uint16(identity)
ed.version = uint8(identity >> 16)
}

View File

@@ -0,0 +1,177 @@
package etw
import (
"bytes"
"encoding/binary"
)
// InType indicates the type of data contained in the ETW event.
type InType byte
// Various InType definitions for TraceLogging. These must match the definitions
// found in TraceLoggingProvider.h in the Windows SDK.
const (
InTypeNull InType = iota
InTypeUnicodeString
InTypeANSIString
InTypeInt8
InTypeUint8
InTypeInt16
InTypeUint16
InTypeInt32
InTypeUint32
InTypeInt64
InTypeUint64
InTypeFloat
InTypeDouble
InTypeBool32
InTypeBinary
InTypeGUID
InTypePointerUnsupported
InTypeFileTime
InTypeSystemTime
InTypeSID
InTypeHexInt32
InTypeHexInt64
InTypeCountedString
InTypeCountedANSIString
InTypeStruct
InTypeCountedBinary
InTypeCountedArray InType = 32
InTypeArray InType = 64
)
// OutType specifies a hint to the event decoder for how the value should be
// formatted.
type OutType byte
// Various OutType definitions for TraceLogging. These must match the
// definitions found in TraceLoggingProvider.h in the Windows SDK.
const (
// OutTypeDefault indicates that the default formatting for the InType will
// be used by the event decoder.
OutTypeDefault OutType = iota
OutTypeNoPrint
OutTypeString
OutTypeBoolean
OutTypeHex
OutTypePID
OutTypeTID
OutTypePort
OutTypeIPv4
OutTypeIPv6
OutTypeSocketAddress
OutTypeXML
OutTypeJSON
OutTypeWin32Error
OutTypeNTStatus
OutTypeHResult
OutTypeFileTime
OutTypeSigned
OutTypeUnsigned
OutTypeUTF8 OutType = 35
OutTypePKCS7WithTypeInfo OutType = 36
OutTypeCodePointer OutType = 37
OutTypeDateTimeUTC OutType = 38
)
// EventMetadata maintains a buffer which builds up the metadata for an ETW
// event. It needs to be paired with EventData which describes the event.
type EventMetadata struct {
buffer bytes.Buffer
}
// Bytes returns the raw binary data containing the event metadata. Before being
// returned, the current size of the buffer is written to the start of the
// buffer. The returned value is not copied from the internal buffer, so it can
// be mutated by the EventMetadata object after it is returned.
func (em *EventMetadata) Bytes() []byte {
// Finalize the event metadata buffer by filling in the buffer length at the
// beginning.
binary.LittleEndian.PutUint16(em.buffer.Bytes(), uint16(em.buffer.Len()))
return em.buffer.Bytes()
}
// WriteEventHeader writes the metadata for the start of an event to the buffer.
// This specifies the event name and tags.
func (em *EventMetadata) WriteEventHeader(name string, tags uint32) {
binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder
em.writeTags(tags)
em.buffer.WriteString(name)
em.buffer.WriteByte(0) // Null terminator for name
}
func (em *EventMetadata) writeField(name string, inType InType, outType OutType, tags uint32, arrSize uint16) {
em.buffer.WriteString(name)
em.buffer.WriteByte(0) // Null terminator for name
if outType == OutTypeDefault && tags == 0 {
em.buffer.WriteByte(byte(inType))
} else {
em.buffer.WriteByte(byte(inType | 128))
if tags == 0 {
em.buffer.WriteByte(byte(outType))
} else {
em.buffer.WriteByte(byte(outType | 128))
em.writeTags(tags)
}
}
if arrSize != 0 {
binary.Write(&em.buffer, binary.LittleEndian, arrSize)
}
}
// writeTags writes out the tags value to the event metadata. Tags is a 28-bit
// value, interpreted as bit flags, which are only relevant to the event
// consumer. The event consumer may choose to attribute special meaning to tags
// (e.g. 0x4 could mean the field contains PII). Tags are written as a series of
// bytes, each containing 7 bits of tag value, with the high bit set if there is
// more tag data in the following byte. This allows for a more compact
// representation when not all of the tag bits are needed.
func (em *EventMetadata) writeTags(tags uint32) {
// Only use the top 28 bits of the tags value.
tags &= 0xfffffff
for {
// Tags are written with the most significant bits (e.g. 21-27) first.
val := tags >> 21
if tags&0x1fffff == 0 {
// If there is no more data to write after this, write this value
// without the high bit set, and return.
em.buffer.WriteByte(byte(val & 0x7f))
return
}
em.buffer.WriteByte(byte(val | 0x80))
tags <<= 7
}
}
// WriteField writes the metadata for a simple field to the buffer.
func (em *EventMetadata) WriteField(name string, inType InType, outType OutType, tags uint32) {
em.writeField(name, inType, outType, tags, 0)
}
// WriteArray writes the metadata for an array field to the buffer. The number
// of elements in the array must be written as a uint16 in the event data,
// immediately preceeding the event data.
func (em *EventMetadata) WriteArray(name string, inType InType, outType OutType, tags uint32) {
em.writeField(name, inType|InTypeArray, outType, tags, 0)
}
// WriteCountedArray writes the metadata for an array field to the buffer. The
// size of a counted array is fixed, and the size is written into the metadata
// directly.
func (em *EventMetadata) WriteCountedArray(name string, count uint16, inType InType, outType OutType, tags uint32) {
em.writeField(name, inType|InTypeCountedArray, outType, tags, count)
}
// WriteStruct writes the metadata for a nested struct to the buffer. The struct
// contains the next N fields in the metadata, where N is specified by the
// fieldCount argument.
func (em *EventMetadata) WriteStruct(name string, fieldCount uint8, tags uint32) {
em.writeField(name, InTypeStruct, OutType(fieldCount), tags, 0)
}

View File

@@ -0,0 +1,63 @@
package etw
import (
"golang.org/x/sys/windows"
)
type eventOptions struct {
descriptor *EventDescriptor
activityID *windows.GUID
relatedActivityID *windows.GUID
tags uint32
}
// EventOpt defines the option function type that can be passed to
// Provider.WriteEvent to specify general event options, such as level and
// keyword.
type EventOpt func(options *eventOptions)
// WithEventOpts returns the variadic arguments as a single slice.
func WithEventOpts(opts ...EventOpt) []EventOpt {
return opts
}
// WithLevel specifies the level of the event to be written.
func WithLevel(level Level) EventOpt {
return func(options *eventOptions) {
options.descriptor.Level = level
}
}
// WithKeyword specifies the keywords of the event to be written. Multiple uses
// of this option are OR'd together.
func WithKeyword(keyword uint64) EventOpt {
return func(options *eventOptions) {
options.descriptor.Keyword |= keyword
}
}
func WithChannel(channel Channel) EventOpt {
return func(options *eventOptions) {
options.descriptor.Channel = channel
}
}
// WithTags specifies the tags of the event to be written. Tags is a 28-bit
// value (top 4 bits are ignored) which are interpreted by the event consumer.
func WithTags(newTags uint32) EventOpt {
return func(options *eventOptions) {
options.tags |= newTags
}
}
func WithActivityID(activityID *windows.GUID) EventOpt {
return func(options *eventOptions) {
options.activityID = activityID
}
}
func WithRelatedActivityID(activityID *windows.GUID) EventOpt {
return func(options *eventOptions) {
options.relatedActivityID = activityID
}
}

View File

@@ -0,0 +1,379 @@
package etw
import (
"math"
"unsafe"
)
// FieldOpt defines the option function type that can be passed to
// Provider.WriteEvent to add fields to the event.
type FieldOpt func(em *EventMetadata, ed *EventData)
// WithFields returns the variadic arguments as a single slice.
func WithFields(opts ...FieldOpt) []FieldOpt {
return opts
}
// BoolField adds a single bool field to the event.
func BoolField(name string, value bool) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeUint8, OutTypeBoolean, 0)
bool8 := uint8(0)
if value {
bool8 = uint8(1)
}
ed.WriteUint8(bool8)
}
}
// BoolArray adds an array of bool to the event.
func BoolArray(name string, values []bool) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeUint8, OutTypeBoolean, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
bool8 := uint8(0)
if v {
bool8 = uint8(1)
}
ed.WriteUint8(bool8)
}
}
}
// StringField adds a single string field to the event.
func StringField(name string, value string) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeANSIString, OutTypeUTF8, 0)
ed.WriteString(value)
}
}
// StringArray adds an array of string to the event.
func StringArray(name string, values []string) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeANSIString, OutTypeUTF8, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteString(v)
}
}
}
// IntField adds a single int field to the event.
func IntField(name string, value int) FieldOpt {
switch unsafe.Sizeof(value) {
case 4:
return Int32Field(name, int32(value))
case 8:
return Int64Field(name, int64(value))
default:
panic("Unsupported int size")
}
}
// IntArray adds an array of int to the event.
func IntArray(name string, values []int) FieldOpt {
inType := InTypeNull
var writeItem func(*EventData, int)
switch unsafe.Sizeof(values[0]) {
case 4:
inType = InTypeInt32
writeItem = func(ed *EventData, item int) { ed.WriteInt32(int32(item)) }
case 8:
inType = InTypeInt64
writeItem = func(ed *EventData, item int) { ed.WriteInt64(int64(item)) }
default:
panic("Unsupported int size")
}
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, inType, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
writeItem(ed, v)
}
}
}
// Int8Field adds a single int8 field to the event.
func Int8Field(name string, value int8) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeInt8, OutTypeDefault, 0)
ed.WriteInt8(value)
}
}
// Int8Array adds an array of int8 to the event.
func Int8Array(name string, values []int8) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeInt8, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteInt8(v)
}
}
}
// Int16Field adds a single int16 field to the event.
func Int16Field(name string, value int16) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeInt16, OutTypeDefault, 0)
ed.WriteInt16(value)
}
}
// Int16Array adds an array of int16 to the event.
func Int16Array(name string, values []int16) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeInt16, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteInt16(v)
}
}
}
// Int32Field adds a single int32 field to the event.
func Int32Field(name string, value int32) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeInt32, OutTypeDefault, 0)
ed.WriteInt32(value)
}
}
// Int32Array adds an array of int32 to the event.
func Int32Array(name string, values []int32) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeInt32, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteInt32(v)
}
}
}
// Int64Field adds a single int64 field to the event.
func Int64Field(name string, value int64) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeInt64, OutTypeDefault, 0)
ed.WriteInt64(value)
}
}
// Int64Array adds an array of int64 to the event.
func Int64Array(name string, values []int64) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeInt64, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteInt64(v)
}
}
}
// UintField adds a single uint field to the event.
func UintField(name string, value uint) FieldOpt {
switch unsafe.Sizeof(value) {
case 4:
return Uint32Field(name, uint32(value))
case 8:
return Uint64Field(name, uint64(value))
default:
panic("Unsupported uint size")
}
}
// UintArray adds an array of uint to the event.
func UintArray(name string, values []uint) FieldOpt {
inType := InTypeNull
var writeItem func(*EventData, uint)
switch unsafe.Sizeof(values[0]) {
case 4:
inType = InTypeUint32
writeItem = func(ed *EventData, item uint) { ed.WriteUint32(uint32(item)) }
case 8:
inType = InTypeUint64
writeItem = func(ed *EventData, item uint) { ed.WriteUint64(uint64(item)) }
default:
panic("Unsupported uint size")
}
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, inType, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
writeItem(ed, v)
}
}
}
// Uint8Field adds a single uint8 field to the event.
func Uint8Field(name string, value uint8) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeUint8, OutTypeDefault, 0)
ed.WriteUint8(value)
}
}
// Uint8Array adds an array of uint8 to the event.
func Uint8Array(name string, values []uint8) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeUint8, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteUint8(v)
}
}
}
// Uint16Field adds a single uint16 field to the event.
func Uint16Field(name string, value uint16) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeUint16, OutTypeDefault, 0)
ed.WriteUint16(value)
}
}
// Uint16Array adds an array of uint16 to the event.
func Uint16Array(name string, values []uint16) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeUint16, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteUint16(v)
}
}
}
// Uint32Field adds a single uint32 field to the event.
func Uint32Field(name string, value uint32) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeUint32, OutTypeDefault, 0)
ed.WriteUint32(value)
}
}
// Uint32Array adds an array of uint32 to the event.
func Uint32Array(name string, values []uint32) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeUint32, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteUint32(v)
}
}
}
// Uint64Field adds a single uint64 field to the event.
func Uint64Field(name string, value uint64) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeUint64, OutTypeDefault, 0)
ed.WriteUint64(value)
}
}
// Uint64Array adds an array of uint64 to the event.
func Uint64Array(name string, values []uint64) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeUint64, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteUint64(v)
}
}
}
// UintptrField adds a single uintptr field to the event.
func UintptrField(name string, value uintptr) FieldOpt {
inType := InTypeNull
var writeItem func(*EventData, uintptr)
switch unsafe.Sizeof(value) {
case 4:
inType = InTypeHexInt32
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint32(uint32(item)) }
case 8:
inType = InTypeHexInt64
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint64(uint64(item)) }
default:
panic("Unsupported uintptr size")
}
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, inType, OutTypeDefault, 0)
writeItem(ed, value)
}
}
// UintptrArray adds an array of uintptr to the event.
func UintptrArray(name string, values []uintptr) FieldOpt {
inType := InTypeNull
var writeItem func(*EventData, uintptr)
switch unsafe.Sizeof(values[0]) {
case 4:
inType = InTypeHexInt32
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint32(uint32(item)) }
case 8:
inType = InTypeHexInt64
writeItem = func(ed *EventData, item uintptr) { ed.WriteUint64(uint64(item)) }
default:
panic("Unsupported uintptr size")
}
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, inType, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
writeItem(ed, v)
}
}
}
// Float32Field adds a single float32 field to the event.
func Float32Field(name string, value float32) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeFloat, OutTypeDefault, 0)
ed.WriteUint32(math.Float32bits(value))
}
}
// Float32Array adds an array of float32 to the event.
func Float32Array(name string, values []float32) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeFloat, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteUint32(math.Float32bits(v))
}
}
}
// Float64Field adds a single float64 field to the event.
func Float64Field(name string, value float64) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteField(name, InTypeDouble, OutTypeDefault, 0)
ed.WriteUint64(math.Float64bits(value))
}
}
// Float64Array adds an array of float64 to the event.
func Float64Array(name string, values []float64) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteArray(name, InTypeDouble, OutTypeDefault, 0)
ed.WriteUint16(uint16(len(values)))
for _, v := range values {
ed.WriteUint64(math.Float64bits(v))
}
}
}
// Struct adds a nested struct to the event, the FieldOpts in the opts argument
// are used to specify the fields of the struct.
func Struct(name string, opts ...FieldOpt) FieldOpt {
return func(em *EventMetadata, ed *EventData) {
em.WriteStruct(name, uint8(len(opts)), 0)
for _, opt := range opts {
opt(em, ed)
}
}
}

View File

@@ -0,0 +1,279 @@
package etw
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"fmt"
"strings"
"unicode/utf16"
"unsafe"
"golang.org/x/sys/windows"
)
// Provider represents an ETW event provider. It is identified by a provider
// name and ID (GUID), which should always have a 1:1 mapping to each other
// (e.g. don't use multiple provider names with the same ID, or vice versa).
type Provider struct {
ID *windows.GUID
handle providerHandle
metadata []byte
callback EnableCallback
index uint
enabled bool
level Level
keywordAny uint64
keywordAll uint64
}
// String returns the `provider`.ID as a string
func (provider *Provider) String() string {
data1 := make([]byte, 4)
binary.BigEndian.PutUint32(data1, provider.ID.Data1)
data2 := make([]byte, 2)
binary.BigEndian.PutUint16(data2, provider.ID.Data2)
data3 := make([]byte, 2)
binary.BigEndian.PutUint16(data3, provider.ID.Data3)
return fmt.Sprintf(
"%s-%s-%s-%s-%s",
hex.EncodeToString(data1),
hex.EncodeToString(data2),
hex.EncodeToString(data3),
hex.EncodeToString(provider.ID.Data4[:2]),
hex.EncodeToString(provider.ID.Data4[2:]))
}
type providerHandle windows.Handle
// ProviderState informs the provider EnableCallback what action is being
// performed.
type ProviderState uint32
const (
// ProviderStateDisable indicates the provider is being disabled.
ProviderStateDisable ProviderState = iota
// ProviderStateEnable indicates the provider is being enabled.
ProviderStateEnable
// ProviderStateCaptureState indicates the provider is having its current
// state snap-shotted.
ProviderStateCaptureState
)
type eventInfoClass uint32
const (
eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota
eventInfoClassProviderSetReserved1
eventInfoClassProviderSetTraits
eventInfoClassProviderUseDescriptorType
)
// EnableCallback is the form of the callback function that receives provider
// enable/disable notifications from ETW.
type EnableCallback func(*windows.GUID, ProviderState, Level, uint64, uint64, uintptr)
func providerCallback(sourceID *windows.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) {
provider := providers.getProvider(uint(i))
switch state {
case ProviderStateDisable:
provider.enabled = false
case ProviderStateEnable:
provider.enabled = true
provider.level = level
provider.keywordAny = matchAnyKeyword
provider.keywordAll = matchAllKeyword
}
if provider.callback != nil {
provider.callback(sourceID, state, level, matchAnyKeyword, matchAllKeyword, filterData)
}
}
// providerCallbackAdapter acts as the first-level callback from the C/ETW side
// for provider notifications. Because Go has trouble with callback arguments of
// different size, it has only pointer-sized arguments, which are then cast to
// the appropriate types when calling providerCallback.
func providerCallbackAdapter(sourceID *windows.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr {
providerCallback(sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i)
return 0
}
// providerIDFromName generates a provider ID based on the provider name. It
// uses the same algorithm as used by .NET's EventSource class, which is based
// on RFC 4122. More information on the algorithm can be found here:
// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/
// The algorithm is roughly:
// Hash = Sha1(namespace + arg.ToUpper().ToUtf16be())
// Guid = Hash[0..15], with Hash[7] tweaked according to RFC 4122
func providerIDFromName(name string) *windows.GUID {
buffer := sha1.New()
namespace := []byte{0x48, 0x2C, 0x2D, 0xB2, 0xC3, 0x90, 0x47, 0xC8, 0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}
buffer.Write(namespace)
binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name))))
sum := buffer.Sum(nil)
sum[7] = (sum[7] & 0xf) | 0x50
return &windows.GUID{
Data1: binary.LittleEndian.Uint32(sum[0:4]),
Data2: binary.LittleEndian.Uint16(sum[4:6]),
Data3: binary.LittleEndian.Uint16(sum[6:8]),
Data4: [8]byte{sum[8], sum[9], sum[10], sum[11], sum[12], sum[13], sum[14], sum[15]},
}
}
// NewProvider creates and registers a new ETW provider. The provider ID is
// generated based on the provider name.
func NewProvider(name string, callback EnableCallback) (provider *Provider, err error) {
return NewProviderWithID(name, providerIDFromName(name), callback)
}
// NewProviderWithID creates and registers a new ETW provider, allowing the
// provider ID to be manually specified. This is most useful when there is an
// existing provider ID that must be used to conform to existing diagnostic
// infrastructure.
func NewProviderWithID(name string, id *windows.GUID, callback EnableCallback) (provider *Provider, err error) {
providerCallbackOnce.Do(func() {
globalProviderCallback = windows.NewCallback(providerCallbackAdapter)
})
provider = providers.newProvider()
defer func() {
if err != nil {
providers.removeProvider(provider)
}
}()
provider.ID = id
provider.callback = callback
if err := eventRegister(provider.ID, globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil {
return nil, err
}
metadata := &bytes.Buffer{}
binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later)
metadata.WriteString(name)
metadata.WriteByte(0) // Null terminator for name
binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer
provider.metadata = metadata.Bytes()
if err := eventSetInformation(
provider.handle,
eventInfoClassProviderSetTraits,
uintptr(unsafe.Pointer(&provider.metadata[0])),
uint32(len(provider.metadata))); err != nil {
return nil, err
}
return provider, nil
}
// Close unregisters the provider.
func (provider *Provider) Close() error {
providers.removeProvider(provider)
return eventUnregister(provider.handle)
}
// IsEnabled calls IsEnabledForLevelAndKeywords with LevelAlways and all
// keywords set.
func (provider *Provider) IsEnabled() bool {
return provider.IsEnabledForLevelAndKeywords(LevelAlways, ^uint64(0))
}
// IsEnabledForLevel calls IsEnabledForLevelAndKeywords with the specified level
// and all keywords set.
func (provider *Provider) IsEnabledForLevel(level Level) bool {
return provider.IsEnabledForLevelAndKeywords(level, ^uint64(0))
}
// IsEnabledForLevelAndKeywords allows event producer code to check if there are
// any event sessions that are interested in an event, based on the event level
// and keywords. Although this check happens automatically in the ETW
// infrastructure, it can be useful to check if an event will actually be
// consumed before doing expensive work to build the event data.
func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool {
if !provider.enabled {
return false
}
// ETW automatically sets the level to 255 if it is specified as 0, so we
// don't need to worry about the level=0 (all events) case.
if level > provider.level {
return false
}
if keywords != 0 && (keywords&provider.keywordAny == 0 || keywords&provider.keywordAll != provider.keywordAll) {
return false
}
return true
}
// WriteEvent writes a single ETW event from the provider. The event is
// constructed based on the EventOpt and FieldOpt values that are passed as
// opts.
func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error {
options := eventOptions{descriptor: NewEventDescriptor()}
em := &EventMetadata{}
ed := &EventData{}
// We need to evaluate the EventOpts first since they might change tags, and
// we write out the tags before evaluating FieldOpts.
for _, opt := range eventOpts {
opt(&options)
}
if !provider.IsEnabledForLevelAndKeywords(options.descriptor.Level, options.descriptor.Keyword) {
return nil
}
em.WriteEventHeader(name, options.tags)
for _, opt := range fieldOpts {
opt(em, ed)
}
// Don't pass a data blob if there is no event data. There will always be
// event metadata (e.g. for the name) so we don't need to do this check for
// the metadata.
dataBlobs := [][]byte{}
if len(ed.Bytes()) > 0 {
dataBlobs = [][]byte{ed.Bytes()}
}
return provider.WriteEventRaw(options.descriptor, nil, nil, [][]byte{em.Bytes()}, dataBlobs)
}
// WriteEventRaw writes a single ETW event from the provider. This function is
// less abstracted than WriteEvent, and presents a fairly direct interface to
// the event writing functionality. It expects a series of event metadata and
// event data blobs to be passed in, which must conform to the TraceLogging
// schema. The functions on EventMetadata and EventData can help with creating
// these blobs. The blobs of each type are effectively concatenated together by
// the ETW infrastructure.
func (provider *Provider) WriteEventRaw(
descriptor *EventDescriptor,
activityID *windows.GUID,
relatedActivityID *windows.GUID,
metadataBlobs [][]byte,
dataBlobs [][]byte) error {
dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs))
dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount)
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata))
for _, blob := range metadataBlobs {
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob))
}
for _, blob := range dataBlobs {
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob))
}
return eventWriteTransfer(provider.handle, descriptor, activityID, relatedActivityID, dataDescriptorCount, &dataDescriptors[0])
}

View File

@@ -0,0 +1,52 @@
package etw
import (
"sync"
)
// Because the provider callback function needs to be able to access the
// provider data when it is invoked by ETW, we need to keep provider data stored
// in a global map based on an index. The index is passed as the callback
// context to ETW.
type providerMap struct {
m map[uint]*Provider
i uint
lock sync.Mutex
once sync.Once
}
var providers = providerMap{
m: make(map[uint]*Provider),
}
func (p *providerMap) newProvider() *Provider {
p.lock.Lock()
defer p.lock.Unlock()
i := p.i
p.i++
provider := &Provider{
index: i,
}
p.m[i] = provider
return provider
}
func (p *providerMap) removeProvider(provider *Provider) {
p.lock.Lock()
defer p.lock.Unlock()
delete(p.m, provider.index)
}
func (p *providerMap) getProvider(index uint) *Provider {
p.lock.Lock()
defer p.lock.Unlock()
return p.m[index]
}
var providerCallbackOnce sync.Once
var globalProviderCallback uintptr

View File

@@ -0,0 +1,16 @@
// +build 386 arm
package etw
import (
"unsafe"
)
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
// be 64 bits, regardless of the actual size of a pointer on the platform. This
// is intended for use with certain Windows APIs that expect a pointer as a
// ULONGLONG.
type ptr64 struct {
ptr unsafe.Pointer
_ uint32
}

View File

@@ -0,0 +1,15 @@
// +build amd64 arm64
package etw
import (
"unsafe"
)
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
// be 64 bits, regardless of the actual size of a pointer on the platform. This
// is intended for use with certain Windows APIs that expect a pointer as a
// ULONGLONG.
type ptr64 struct {
ptr unsafe.Pointer
}

View File

@@ -0,0 +1,91 @@
// Shows a sample usage of the ETW logging package.
package main
import (
"bufio"
"fmt"
"os"
"github.com/Microsoft/go-winio/internal/etw"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
)
func callback(sourceID *windows.GUID, state etw.ProviderState, level etw.Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr) {
fmt.Printf("Callback: isEnabled=%d, level=%d, matchAnyKeyword=%d\n", state, level, matchAnyKeyword)
}
func main() {
provider, err := etw.NewProvider("TestProvider", callback)
if err != nil {
logrus.Error(err)
return
}
defer func() {
if err := provider.Close(); err != nil {
logrus.Error(err)
}
}()
fmt.Printf("Provider ID: %s\n", provider)
reader := bufio.NewReader(os.Stdin)
fmt.Println("Press enter to log events")
reader.ReadString('\n')
// Write using high-level API.
if err := provider.WriteEvent(
"TestEvent",
etw.WithEventOpts(
etw.WithLevel(etw.LevelInfo),
etw.WithKeyword(0x140),
),
etw.WithFields(
etw.StringField("TestField", "Foo"),
etw.StringField("TestField2", "Bar"),
etw.Struct("TestStruct",
etw.StringField("Field1", "Value1"),
etw.StringField("Field2", "Value2")),
etw.StringArray("TestArray", []string{
"Item1",
"Item2",
"Item3",
"Item4",
"Item5",
})),
); err != nil {
logrus.Error(err)
return
}
// Write using low-level API.
descriptor := etw.NewEventDescriptor()
descriptor.Level = etw.LevelInfo
descriptor.Keyword = 0x140
em := &etw.EventMetadata{}
ed := &etw.EventData{}
em.WriteEventHeader("TestEvent", 0)
em.WriteField("TestField", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
ed.WriteString("Foo")
em.WriteField("TestField2", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
ed.WriteString("Bar")
em.WriteStruct("TestStruct", 2, 0)
em.WriteField("Field1", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
ed.WriteString("Value1")
em.WriteField("Field2", etw.InTypeANSIString, etw.OutTypeUTF8, 0)
ed.WriteString("Value2")
em.WriteArray("TestArray", etw.InTypeANSIString, etw.OutTypeDefault, 0)
ed.WriteUint16(5)
ed.WriteString("Item1")
ed.WriteString("Item2")
ed.WriteString("Item3")
ed.WriteString("Item4")
ed.WriteString("Item5")
if err := provider.WriteEventRaw(descriptor, nil, nil, [][]byte{em.Bytes()}, [][]byte{ed.Bytes()}); err != nil {
logrus.Error(err)
return
}
}

View File

@@ -0,0 +1,78 @@
// Code generated by 'go generate'; DO NOT EDIT.
package etw
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procEventRegister = modadvapi32.NewProc("EventRegister")
procEventUnregister = modadvapi32.NewProc("EventUnregister")
procEventWriteTransfer = modadvapi32.NewProc("EventWriteTransfer")
procEventSetInformation = modadvapi32.NewProc("EventSetInformation")
)
func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) {
r0, _, _ := syscall.Syscall6(procEventRegister.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(callback), uintptr(callbackContext), uintptr(unsafe.Pointer(providerHandle)), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func eventUnregister(providerHandle providerHandle) (win32err error) {
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func eventWriteTransfer(providerHandle providerHandle, descriptor *EventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)))
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) {
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}

421
vendor/github.com/Microsoft/go-winio/pipe.go generated vendored Normal file
View File

@@ -0,0 +1,421 @@
// +build windows
package winio
import (
"errors"
"io"
"net"
"os"
"syscall"
"time"
"unsafe"
)
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
cERROR_NO_DATA = syscall.Errno(232)
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
cPIPE_ACCESS_DUPLEX = 0x3
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
cPIPE_UNLIMITED_INSTANCES = 255
cNMPWAIT_USE_DEFAULT_WAIT = 0
cNMPWAIT_NOWAIT = 1
cPIPE_TYPE_MESSAGE = 4
cPIPE_READMODE_MESSAGE = 2
)
var (
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
// This error should match net.errClosing since docker takes a dependency on its text.
ErrPipeListenerClosed = errors.New("use of closed network connection")
errPipeWriteClosed = errors.New("pipe has been closed for write")
)
type win32Pipe struct {
*win32File
path string
}
type win32MessageBytePipe struct {
win32Pipe
writeClosed bool
readEOF bool
}
type pipeAddress string
func (f *win32Pipe) LocalAddr() net.Addr {
return pipeAddress(f.path)
}
func (f *win32Pipe) RemoteAddr() net.Addr {
return pipeAddress(f.path)
}
func (f *win32Pipe) SetDeadline(t time.Time) error {
f.SetReadDeadline(t)
f.SetWriteDeadline(t)
return nil
}
// CloseWrite closes the write side of a message pipe in byte mode.
func (f *win32MessageBytePipe) CloseWrite() error {
if f.writeClosed {
return errPipeWriteClosed
}
err := f.win32File.Flush()
if err != nil {
return err
}
_, err = f.win32File.Write(nil)
if err != nil {
return err
}
f.writeClosed = true
return nil
}
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
// they are used to implement CloseWrite().
func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
if f.writeClosed {
return 0, errPipeWriteClosed
}
if len(b) == 0 {
return 0, nil
}
return f.win32File.Write(b)
}
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
// mode pipe will return io.EOF, as will all subsequent reads.
func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
if f.readEOF {
return 0, io.EOF
}
n, err := f.win32File.Read(b)
if err == io.EOF {
// If this was the result of a zero-byte read, then
// it is possible that the read was due to a zero-size
// message. Since we are simulating CloseWrite with a
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
} else if err == syscall.ERROR_MORE_DATA {
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
// and the message still has more bytes. Treat this as a success, since
// this package presents all named pipes as byte streams.
err = nil
}
return n, err
}
func (s pipeAddress) Network() string {
return "pipe"
}
func (s pipeAddress) String() string {
return string(s)
}
// DialPipe connects to a named pipe by path, timing out if the connection
// takes longer than the specified duration. If timeout is nil, then we use
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
var absTimeout time.Time
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
} else {
absTimeout = time.Now().Add(time.Second * 2)
}
var err error
var h syscall.Handle
for {
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != cERROR_PIPE_BUSY {
break
}
if time.Now().After(absTimeout) {
return nil, ErrTimeout
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
time.Sleep(time.Millisecond * 10)
}
if err != nil {
return nil, &os.PathError{Op: "open", Path: path, Err: err}
}
var flags uint32
err = getNamedPipeInfo(h, &flags, nil, nil, nil)
if err != nil {
return nil, err
}
f, err := makeWin32File(h)
if err != nil {
syscall.Close(h)
return nil, err
}
// If the pipe is in message mode, return a message byte pipe, which
// supports CloseWrite().
if flags&cPIPE_TYPE_MESSAGE != 0 {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: f, path: path},
}, nil
}
return &win32Pipe{win32File: f, path: path}, nil
}
type acceptResponse struct {
f *win32File
err error
}
type win32PipeListener struct {
firstHandle syscall.Handle
path string
securityDescriptor []byte
config PipeConfig
acceptCh chan (chan acceptResponse)
closeCh chan int
doneCh chan int
}
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
if first {
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
}
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
if c.MessageMode {
mode |= cPIPE_TYPE_MESSAGE
}
sa := &syscall.SecurityAttributes{}
sa.Length = uint32(unsafe.Sizeof(*sa))
if securityDescriptor != nil {
len := uint32(len(securityDescriptor))
sa.SecurityDescriptor = localAlloc(0, len)
defer localFree(sa.SecurityDescriptor)
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
}
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
return h, nil
}
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
if err != nil {
return nil, err
}
f, err := makeWin32File(h)
if err != nil {
syscall.Close(h)
return nil, err
}
return f, nil
}
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
p, err := l.makeServerPipe()
if err != nil {
return nil, err
}
// Wait for the client to connect.
ch := make(chan error)
go func(p *win32File) {
ch <- connectPipe(p)
}(p)
select {
case err = <-ch:
if err != nil {
p.Close()
p = nil
}
case <-l.closeCh:
// Abort the connect request by closing the handle.
p.Close()
p = nil
err = <-ch
if err == nil || err == ErrFileClosed {
err = ErrPipeListenerClosed
}
}
return p, err
}
func (l *win32PipeListener) listenerRoutine() {
closed := false
for !closed {
select {
case <-l.closeCh:
closed = true
case responseCh := <-l.acceptCh:
var (
p *win32File
err error
)
for {
p, err = l.makeConnectedServerPipe()
// If the connection was immediately closed by the client, try
// again.
if err != cERROR_NO_DATA {
break
}
}
responseCh <- acceptResponse{p, err}
closed = err == ErrPipeListenerClosed
}
}
syscall.Close(l.firstHandle)
l.firstHandle = 0
// Notify Close() and Accept() callers that the handle has been closed.
close(l.doneCh)
}
// PipeConfig contain configuration for the pipe listener.
type PipeConfig struct {
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
SecurityDescriptor string
// MessageMode determines whether the pipe is in byte or message mode. In either
// case the pipe is read in byte mode by default. The only practical difference in
// this implementation is that CloseWrite() is only supported for message mode pipes;
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
// transferred to the reader (and returned as io.EOF in this implementation)
// when the pipe is in message mode.
MessageMode bool
// InputBufferSize specifies the size the input buffer, in bytes.
InputBufferSize int32
// OutputBufferSize specifies the size the input buffer, in bytes.
OutputBufferSize int32
}
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
// The pipe must not already exist.
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
var (
sd []byte
err error
)
if c == nil {
c = &PipeConfig{}
}
if c.SecurityDescriptor != "" {
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
if err != nil {
return nil, err
}
}
h, err := makeServerPipeHandle(path, sd, c, true)
if err != nil {
return nil, err
}
// Create a client handle and connect it. This results in the pipe
// instance always existing, so that clients see ERROR_PIPE_BUSY
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
// up so that no other instances can be used. This would have been
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
// instead of CreateNamedPipe. (Apparently created named pipes are
// considered to be in listening state regardless of whether any
// active calls to ConnectNamedPipe are outstanding.)
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != nil {
syscall.Close(h)
return nil, err
}
// Close the client handle. The server side of the instance will
// still be busy, leading to ERROR_PIPE_BUSY instead of
// ERROR_NOT_FOUND, as long as we don't close the server handle,
// or disconnect the client with DisconnectNamedPipe.
syscall.Close(h2)
l := &win32PipeListener{
firstHandle: h,
path: path,
securityDescriptor: sd,
config: *c,
acceptCh: make(chan (chan acceptResponse)),
closeCh: make(chan int),
doneCh: make(chan int),
}
go l.listenerRoutine()
return l, nil
}
func connectPipe(p *win32File) error {
c, err := p.prepareIo()
if err != nil {
return err
}
defer p.wg.Done()
err = connectNamedPipe(p.handle, &c.o)
_, err = p.asyncIo(c, nil, 0, err)
if err != nil && err != cERROR_PIPE_CONNECTED {
return err
}
return nil
}
func (l *win32PipeListener) Accept() (net.Conn, error) {
ch := make(chan acceptResponse)
select {
case l.acceptCh <- ch:
response := <-ch
err := response.err
if err != nil {
return nil, err
}
if l.config.MessageMode {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: response.f, path: l.path},
}, nil
}
return &win32Pipe{win32File: response.f, path: l.path}, nil
case <-l.doneCh:
return nil, ErrPipeListenerClosed
}
}
func (l *win32PipeListener) Close() error {
select {
case l.closeCh <- 1:
<-l.doneCh
case <-l.doneCh:
}
return nil
}
func (l *win32PipeListener) Addr() net.Addr {
return pipeAddress(l.path)
}

516
vendor/github.com/Microsoft/go-winio/pipe_test.go generated vendored Normal file
View File

@@ -0,0 +1,516 @@
package winio
import (
"bufio"
"bytes"
"io"
"net"
"os"
"sync"
"syscall"
"testing"
"time"
"unsafe"
)
var testPipeName = `\\.\pipe\winiotestpipe`
var aLongTimeAgo = time.Unix(1, 0)
func TestDialUnknownFailsImmediately(t *testing.T) {
_, err := DialPipe(testPipeName, nil)
if err.(*os.PathError).Err != syscall.ENOENT {
t.Fatalf("expected ENOENT got %v", err)
}
}
func TestDialListenerTimesOut(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
var d = time.Duration(10 * time.Millisecond)
_, err = DialPipe(testPipeName, &d)
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
}
func TestDialAccessDeniedWithRestrictedSD(t *testing.T) {
c := PipeConfig{
SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)",
}
l, err := ListenPipe(testPipeName, &c)
if err != nil {
t.Fatal(err)
}
defer l.Close()
_, err = DialPipe(testPipeName, nil)
if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED {
t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err)
}
}
func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) {
l, err := ListenPipe(testPipeName, cfg)
if err != nil {
return
}
defer l.Close()
type response struct {
c net.Conn
err error
}
ch := make(chan response)
go func() {
c, err := l.Accept()
ch <- response{c, err}
}()
c, err := DialPipe(testPipeName, nil)
if err != nil {
return
}
r := <-ch
if err = r.err; err != nil {
c.Close()
return
}
client = c
server = r.c
return
}
func TestReadTimeout(t *testing.T) {
c, s, err := getConnection(nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
c.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
buf := make([]byte, 10)
_, err = c.Read(buf)
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
}
func server(l net.Listener, ch chan int) {
c, err := l.Accept()
if err != nil {
panic(err)
}
rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
s, err := rw.ReadString('\n')
if err != nil {
panic(err)
}
_, err = rw.WriteString("got " + s)
if err != nil {
panic(err)
}
err = rw.Flush()
if err != nil {
panic(err)
}
c.Close()
ch <- 1
}
func TestFullListenDialReadWrite(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
ch := make(chan int)
go server(l, ch)
c, err := DialPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
_, err = rw.WriteString("hello world\n")
if err != nil {
t.Fatal(err)
}
err = rw.Flush()
if err != nil {
t.Fatal(err)
}
s, err := rw.ReadString('\n')
if err != nil {
t.Fatal(err)
}
ms := "got hello world\n"
if s != ms {
t.Errorf("expected '%s', got '%s'", ms, s)
}
<-ch
}
func TestCloseAbortsListen(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
ch := make(chan error)
go func() {
_, err := l.Accept()
ch <- err
}()
time.Sleep(30 * time.Millisecond)
l.Close()
err = <-ch
if err != ErrPipeListenerClosed {
t.Fatalf("expected ErrPipeListenerClosed, got %v", err)
}
}
func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) {
b := make([]byte, 10)
w.Close()
n, err := r.Read(b)
if n > 0 {
t.Errorf("unexpected byte count %d", n)
}
if err != io.EOF {
t.Errorf("expected EOF: %v", err)
}
}
func TestCloseClientEOFServer(t *testing.T) {
c, s, err := getConnection(nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
ensureEOFOnClose(t, c, s)
}
func TestCloseServerEOFClient(t *testing.T) {
c, s, err := getConnection(nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
ensureEOFOnClose(t, s, c)
}
func TestCloseWriteEOF(t *testing.T) {
cfg := &PipeConfig{
MessageMode: true,
}
c, s, err := getConnection(cfg)
if err != nil {
t.Fatal(err)
}
defer c.Close()
defer s.Close()
type closeWriter interface {
CloseWrite() error
}
err = c.(closeWriter).CloseWrite()
if err != nil {
t.Fatal(err)
}
b := make([]byte, 10)
_, err = s.Read(b)
if err != io.EOF {
t.Fatal(err)
}
}
func TestAcceptAfterCloseFails(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
l.Close()
_, err = l.Accept()
if err != ErrPipeListenerClosed {
t.Fatalf("expected ErrPipeListenerClosed, got %v", err)
}
}
func TestDialTimesOutByDefault(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
_, err = DialPipe(testPipeName, nil)
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
}
func TestTimeoutPendingRead(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
serverDone := make(chan struct{})
go func() {
s, err := l.Accept()
if err != nil {
t.Fatal(err)
}
time.Sleep(1 * time.Second)
s.Close()
close(serverDone)
}()
client, err := DialPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer client.Close()
clientErr := make(chan error)
go func() {
buf := make([]byte, 10)
_, err = client.Read(buf)
clientErr <- err
}()
time.Sleep(100 * time.Millisecond) // make *sure* the pipe is reading before we set the deadline
client.SetReadDeadline(aLongTimeAgo)
select {
case err = <-clientErr:
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
case <-time.After(100 * time.Millisecond):
t.Fatalf("timed out while waiting for read to cancel")
<-clientErr
}
<-serverDone
}
func TestTimeoutPendingWrite(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
serverDone := make(chan struct{})
go func() {
s, err := l.Accept()
if err != nil {
t.Fatal(err)
}
time.Sleep(1 * time.Second)
s.Close()
close(serverDone)
}()
client, err := DialPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer client.Close()
clientErr := make(chan error)
go func() {
_, err = client.Write([]byte("this should timeout"))
clientErr <- err
}()
time.Sleep(100 * time.Millisecond) // make *sure* the pipe is writing before we set the deadline
client.SetWriteDeadline(aLongTimeAgo)
select {
case err = <-clientErr:
if err != ErrTimeout {
t.Fatalf("expected ErrTimeout, got %v", err)
}
case <-time.After(100 * time.Millisecond):
t.Fatalf("timed out while waiting for write to cancel")
<-clientErr
}
<-serverDone
}
type CloseWriter interface {
CloseWrite() error
}
func TestEchoWithMessaging(t *testing.T) {
c := PipeConfig{
MessageMode: true, // Use message mode so that CloseWrite() is supported
InputBufferSize: 65536, // Use 64KB buffers to improve performance
OutputBufferSize: 65536,
}
l, err := ListenPipe(testPipeName, &c)
if err != nil {
t.Fatal(err)
}
defer l.Close()
listenerDone := make(chan bool)
clientDone := make(chan bool)
go func() {
// server echo
conn, e := l.Accept()
if e != nil {
t.Fatal(e)
}
defer conn.Close()
time.Sleep(500 * time.Millisecond) // make *sure* we don't begin to read before eof signal is sent
io.Copy(conn, conn)
conn.(CloseWriter).CloseWrite()
close(listenerDone)
}()
timeout := 1 * time.Second
client, err := DialPipe(testPipeName, &timeout)
if err != nil {
t.Fatal(err)
}
defer client.Close()
go func() {
// client read back
bytes := make([]byte, 2)
n, e := client.Read(bytes)
if e != nil {
t.Fatal(e)
}
if n != 2 {
t.Fatalf("expected 2 bytes, got %v", n)
}
close(clientDone)
}()
payload := make([]byte, 2)
payload[0] = 0
payload[1] = 1
n, err := client.Write(payload)
if err != nil {
t.Fatal(err)
}
if n != 2 {
t.Fatalf("expected 2 bytes, got %v", n)
}
client.(CloseWriter).CloseWrite()
<-listenerDone
<-clientDone
}
func TestConnectRace(t *testing.T) {
l, err := ListenPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer l.Close()
go func() {
for {
s, err := l.Accept()
if err == ErrPipeListenerClosed {
return
}
if err != nil {
t.Fatal(err)
}
s.Close()
}
}()
for i := 0; i < 1000; i++ {
c, err := DialPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
c.Close()
}
}
func TestMessageReadMode(t *testing.T) {
var wg sync.WaitGroup
defer wg.Wait()
l, err := ListenPipe(testPipeName, &PipeConfig{MessageMode: true})
if err != nil {
t.Fatal(err)
}
defer l.Close()
msg := ([]byte)("hello world")
wg.Add(1)
go func() {
defer wg.Done()
s, err := l.Accept()
if err != nil {
t.Fatal(err)
}
_, err = s.Write(msg)
if err != nil {
t.Fatal(err)
}
s.Close()
}()
c, err := DialPipe(testPipeName, nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
setNamedPipeHandleState := syscall.NewLazyDLL("kernel32.dll").NewProc("SetNamedPipeHandleState")
p := c.(*win32MessageBytePipe)
mode := uint32(cPIPE_READMODE_MESSAGE)
if s, _, err := setNamedPipeHandleState.Call(uintptr(p.handle), uintptr(unsafe.Pointer(&mode)), 0, 0); s == 0 {
t.Fatal(err)
}
ch := make([]byte, 1)
var vmsg []byte
for {
n, err := c.Read(ch)
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Fatal("expected 1: ", n)
}
vmsg = append(vmsg, ch[0])
}
if !bytes.Equal(msg, vmsg) {
t.Fatalf("expected %s: %s", msg, vmsg)
}
}

View File

@@ -0,0 +1,192 @@
package etwlogrus
import (
"fmt"
"reflect"
"github.com/Microsoft/go-winio/internal/etw"
"github.com/sirupsen/logrus"
)
// Hook is a Logrus hook which logs received events to ETW.
type Hook struct {
provider *etw.Provider
}
// NewHook registers a new ETW provider and returns a hook to log from it.
func NewHook(providerName string) (*Hook, error) {
hook := Hook{}
provider, err := etw.NewProvider(providerName, nil)
if err != nil {
return nil, err
}
hook.provider = provider
return &hook, nil
}
// Levels returns the set of levels that this hook wants to receive log entries
// for.
func (h *Hook) Levels() []logrus.Level {
return []logrus.Level{
logrus.TraceLevel,
logrus.DebugLevel,
logrus.InfoLevel,
logrus.WarnLevel,
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}
// Fire receives each Logrus entry as it is logged, and logs it to ETW.
func (h *Hook) Fire(e *logrus.Entry) error {
level := etw.Level(e.Level)
if !h.provider.IsEnabledForLevel(level) {
return nil
}
// Reserve extra space for the message field.
fields := make([]etw.FieldOpt, 0, len(e.Data)+1)
fields = append(fields, etw.StringField("Message", e.Message))
for k, v := range e.Data {
fields = append(fields, getFieldOpt(k, v))
}
// We could try to map Logrus levels to ETW levels, but we would lose some
// fidelity as there are fewer ETW levels. So instead we use the level
// directly.
return h.provider.WriteEvent(
"LogrusEntry",
etw.WithEventOpts(etw.WithLevel(level)),
fields)
}
// Currently, we support logging basic builtin types (int, string, etc), slices
// of basic builtin types, error, types derived from the basic types (e.g. "type
// foo int"), and structs (recursively logging their fields). We do not support
// slices of derived types (e.g. "[]foo").
//
// For types that we don't support, the value is formatted via fmt.Sprint, and
// we also log a message that the type is unsupported along with the formatted
// type. The intent of this is to make it easier to see which types are not
// supported in traces, so we can evaluate adding support for more types in the
// future.
func getFieldOpt(k string, v interface{}) etw.FieldOpt {
switch v := v.(type) {
case bool:
return etw.BoolField(k, v)
case []bool:
return etw.BoolArray(k, v)
case string:
return etw.StringField(k, v)
case []string:
return etw.StringArray(k, v)
case int:
return etw.IntField(k, v)
case []int:
return etw.IntArray(k, v)
case int8:
return etw.Int8Field(k, v)
case []int8:
return etw.Int8Array(k, v)
case int16:
return etw.Int16Field(k, v)
case []int16:
return etw.Int16Array(k, v)
case int32:
return etw.Int32Field(k, v)
case []int32:
return etw.Int32Array(k, v)
case int64:
return etw.Int64Field(k, v)
case []int64:
return etw.Int64Array(k, v)
case uint:
return etw.UintField(k, v)
case []uint:
return etw.UintArray(k, v)
case uint8:
return etw.Uint8Field(k, v)
case []uint8:
return etw.Uint8Array(k, v)
case uint16:
return etw.Uint16Field(k, v)
case []uint16:
return etw.Uint16Array(k, v)
case uint32:
return etw.Uint32Field(k, v)
case []uint32:
return etw.Uint32Array(k, v)
case uint64:
return etw.Uint64Field(k, v)
case []uint64:
return etw.Uint64Array(k, v)
case uintptr:
return etw.UintptrField(k, v)
case []uintptr:
return etw.UintptrArray(k, v)
case float32:
return etw.Float32Field(k, v)
case []float32:
return etw.Float32Array(k, v)
case float64:
return etw.Float64Field(k, v)
case []float64:
return etw.Float64Array(k, v)
case error:
return etw.StringField(k, v.Error())
default:
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Bool:
return getFieldOpt(k, rv.Bool())
case reflect.Int:
return getFieldOpt(k, int(rv.Int()))
case reflect.Int8:
return getFieldOpt(k, int8(rv.Int()))
case reflect.Int16:
return getFieldOpt(k, int16(rv.Int()))
case reflect.Int32:
return getFieldOpt(k, int32(rv.Int()))
case reflect.Int64:
return getFieldOpt(k, int64(rv.Int()))
case reflect.Uint:
return getFieldOpt(k, uint(rv.Uint()))
case reflect.Uint8:
return getFieldOpt(k, uint8(rv.Uint()))
case reflect.Uint16:
return getFieldOpt(k, uint16(rv.Uint()))
case reflect.Uint32:
return getFieldOpt(k, uint32(rv.Uint()))
case reflect.Uint64:
return getFieldOpt(k, uint64(rv.Uint()))
case reflect.Uintptr:
return getFieldOpt(k, uintptr(rv.Uint()))
case reflect.Float32:
return getFieldOpt(k, float32(rv.Float()))
case reflect.Float64:
return getFieldOpt(k, float64(rv.Float()))
case reflect.String:
return getFieldOpt(k, rv.String())
case reflect.Struct:
fields := make([]etw.FieldOpt, 0, rv.NumField())
for i := 0; i < rv.NumField(); i++ {
field := rv.Field(i)
if field.CanInterface() {
fields = append(fields, getFieldOpt(k, field.Interface()))
}
}
return etw.Struct(k, fields...)
}
}
return etw.StringField(k, fmt.Sprintf("(Unsupported: %T) %v", v, v))
}
// Close cleans up the hook and closes the ETW provider.
func (h *Hook) Close() error {
return h.provider.Close()
}

View File

@@ -0,0 +1,126 @@
package etwlogrus
import (
"github.com/Microsoft/go-winio/internal/etw"
"testing"
)
func fireEvent(t *testing.T, p *etw.Provider, name string, value interface{}) {
if err := p.WriteEvent(
name,
nil,
etw.WithFields(getFieldOpt("Field", value))); err != nil {
t.Fatal(err)
}
}
// The purpose of this test is to log lots of different field types, to test the
// logic that converts them to ETW. Because we don't have a way to
// programatically validate the ETW events, this test has two main purposes: (1)
// validate nothing causes a panic while logging (2) allow manual validation that
// the data is logged correctly (through a tool like WPA).
func TestFieldLogging(t *testing.T) {
// Sample WPRP to collect this provider:
//
// <?xml version="1.0"?>
// <WindowsPerformanceRecorder Version="1">
// <Profiles>
// <EventCollector Id="Collector" Name="MyCollector">
// <BufferSize Value="256"/>
// <Buffers Value="100"/>
// </EventCollector>
// <EventProvider Id="HookTest" Name="5e50de03-107c-5a83-74c6-998c4491e7e9"/>
// <Profile Id="Test.Verbose.File" Name="Test" Description="Test" LoggingMode="File" DetailLevel="Verbose">
// <Collectors>
// <EventCollectorId Value="Collector">
// <EventProviders>
// <EventProviderId Value="HookTest"/>
// </EventProviders>
// </EventCollectorId>
// </Collectors>
// </Profile>
// </Profiles>
// </WindowsPerformanceRecorder>
//
// Start collection:
// wpr -start HookTest.wprp -filemode
//
// Stop collection:
// wpr -stop HookTest.etl
p, err := etw.NewProvider("HookTest", nil)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := p.Close(); err != nil {
t.Fatal(err)
}
}()
fireEvent(t, p, "Bool", true)
fireEvent(t, p, "BoolSlice", []bool{true, false, true})
fireEvent(t, p, "EmptyBoolSlice", []bool{})
fireEvent(t, p, "String", "teststring")
fireEvent(t, p, "StringSlice", []string{"sstr1", "sstr2", "sstr3"})
fireEvent(t, p, "EmptyStringSlice", []string{})
fireEvent(t, p, "Int", int(1))
fireEvent(t, p, "IntSlice", []int{2, 3, 4})
fireEvent(t, p, "EmptyIntSlice", []int{})
fireEvent(t, p, "Int8", int8(5))
fireEvent(t, p, "Int8Slice", []int8{6, 7, 8})
fireEvent(t, p, "EmptyInt8Slice", []int8{})
fireEvent(t, p, "Int16", int16(9))
fireEvent(t, p, "Int16Slice", []int16{10, 11, 12})
fireEvent(t, p, "EmptyInt16Slice", []int16{})
fireEvent(t, p, "Int32", int32(13))
fireEvent(t, p, "Int32Slice", []int32{14, 15, 16})
fireEvent(t, p, "EmptyInt32Slice", []int32{})
fireEvent(t, p, "Int64", int64(17))
fireEvent(t, p, "Int64Slice", []int64{18, 19, 20})
fireEvent(t, p, "EmptyInt64Slice", []int64{})
fireEvent(t, p, "Uint", uint(21))
fireEvent(t, p, "UintSlice", []uint{22, 23, 24})
fireEvent(t, p, "EmptyUintSlice", []uint{})
fireEvent(t, p, "Uint8", uint8(25))
fireEvent(t, p, "Uint8Slice", []uint8{26, 27, 28})
fireEvent(t, p, "EmptyUint8Slice", []uint8{})
fireEvent(t, p, "Uint16", uint16(29))
fireEvent(t, p, "Uint16Slice", []uint16{30, 31, 32})
fireEvent(t, p, "EmptyUint16Slice", []uint16{})
fireEvent(t, p, "Uint32", uint32(33))
fireEvent(t, p, "Uint32Slice", []uint32{34, 35, 36})
fireEvent(t, p, "EmptyUint32Slice", []uint32{})
fireEvent(t, p, "Uint64", uint64(37))
fireEvent(t, p, "Uint64Slice", []uint64{38, 39, 40})
fireEvent(t, p, "EmptyUint64Slice", []uint64{})
fireEvent(t, p, "Uintptr", uintptr(41))
fireEvent(t, p, "UintptrSlice", []uintptr{42, 43, 44})
fireEvent(t, p, "EmptyUintptrSlice", []uintptr{})
fireEvent(t, p, "Float32", float32(45.46))
fireEvent(t, p, "Float32Slice", []float32{47.48, 49.50, 51.52})
fireEvent(t, p, "EmptyFloat32Slice", []float32{})
fireEvent(t, p, "Float64", float64(53.54))
fireEvent(t, p, "Float64Slice", []float64{55.56, 57.58, 59.60})
fireEvent(t, p, "EmptyFloat64Slice", []float64{})
type struct1 struct {
A float32
priv int
B []uint
}
type struct2 struct {
A int
B int
}
type struct3 struct {
struct2
A int
B string
priv string
C struct1
D uint16
}
// Unexported fields, and fields in embedded structs, should not log.
fireEvent(t, p, "Struct", struct3{struct2{-1, -2}, 1, "2s", "-3s", struct1{3.4, -4, []uint{5, 6, 7}}, 8})
}

202
vendor/github.com/Microsoft/go-winio/privilege.go generated vendored Normal file
View File

@@ -0,0 +1,202 @@
// +build windows
package winio
import (
"bytes"
"encoding/binary"
"fmt"
"runtime"
"sync"
"syscall"
"unicode/utf16"
"golang.org/x/sys/windows"
)
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
//sys revertToSelf() (err error) = advapi32.RevertToSelf
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
const (
SE_PRIVILEGE_ENABLED = 2
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
)
const (
securityAnonymous = iota
securityIdentification
securityImpersonation
securityDelegation
)
var (
privNames = make(map[string]uint64)
privNameMutex sync.Mutex
)
// PrivilegeError represents an error enabling privileges.
type PrivilegeError struct {
privileges []uint64
}
func (e *PrivilegeError) Error() string {
s := ""
if len(e.privileges) > 1 {
s = "Could not enable privileges "
} else {
s = "Could not enable privilege "
}
for i, p := range e.privileges {
if i != 0 {
s += ", "
}
s += `"`
s += getPrivilegeName(p)
s += `"`
}
return s
}
// RunWithPrivilege enables a single privilege for a function call.
func RunWithPrivilege(name string, fn func() error) error {
return RunWithPrivileges([]string{name}, fn)
}
// RunWithPrivileges enables privileges for a function call.
func RunWithPrivileges(names []string, fn func() error) error {
privileges, err := mapPrivileges(names)
if err != nil {
return err
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
token, err := newThreadToken()
if err != nil {
return err
}
defer releaseThreadToken(token)
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
if err != nil {
return err
}
return fn()
}
func mapPrivileges(names []string) ([]uint64, error) {
var privileges []uint64
privNameMutex.Lock()
defer privNameMutex.Unlock()
for _, name := range names {
p, ok := privNames[name]
if !ok {
err := lookupPrivilegeValue("", name, &p)
if err != nil {
return nil, err
}
privNames[name] = p
}
privileges = append(privileges, p)
}
return privileges, nil
}
// EnableProcessPrivileges enables privileges globally for the process.
func EnableProcessPrivileges(names []string) error {
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
}
// DisableProcessPrivileges disables privileges globally for the process.
func DisableProcessPrivileges(names []string) error {
return enableDisableProcessPrivilege(names, 0)
}
func enableDisableProcessPrivilege(names []string, action uint32) error {
privileges, err := mapPrivileges(names)
if err != nil {
return err
}
p, _ := windows.GetCurrentProcess()
var token windows.Token
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
if err != nil {
return err
}
defer token.Close()
return adjustPrivileges(token, privileges, action)
}
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
binary.Write(&b, binary.LittleEndian, p)
binary.Write(&b, binary.LittleEndian, action)
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
if !success {
return err
}
if err == ERROR_NOT_ALL_ASSIGNED {
return &PrivilegeError{privileges}
}
return nil
}
func getPrivilegeName(luid uint64) string {
var nameBuffer [256]uint16
bufSize := uint32(len(nameBuffer))
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
if err != nil {
return fmt.Sprintf("<unknown privilege %d>", luid)
}
var displayNameBuffer [256]uint16
displayBufSize := uint32(len(displayNameBuffer))
var langID uint32
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
if err != nil {
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
}
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
}
func newThreadToken() (windows.Token, error) {
err := impersonateSelf(securityImpersonation)
if err != nil {
return 0, err
}
var token windows.Token
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
if err != nil {
rerr := revertToSelf()
if rerr != nil {
panic(rerr)
}
return 0, err
}
return token, nil
}
func releaseThreadToken(h windows.Token) {
err := revertToSelf()
if err != nil {
panic(err)
}
h.Close()
}

View File

@@ -0,0 +1,17 @@
package winio
import "testing"
func TestRunWithUnavailablePrivilege(t *testing.T) {
err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil })
if _, ok := err.(*PrivilegeError); err == nil || !ok {
t.Fatal("expected PrivilegeError")
}
}
func TestRunWithPrivileges(t *testing.T) {
err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil })
if err != nil {
t.Fatal(err)
}
}

128
vendor/github.com/Microsoft/go-winio/reparse.go generated vendored Normal file
View File

@@ -0,0 +1,128 @@
package winio
import (
"bytes"
"encoding/binary"
"fmt"
"strings"
"unicode/utf16"
"unsafe"
)
const (
reparseTagMountPoint = 0xA0000003
reparseTagSymlink = 0xA000000C
)
type reparseDataBuffer struct {
ReparseTag uint32
ReparseDataLength uint16
Reserved uint16
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
}
// ReparsePoint describes a Win32 symlink or mount point.
type ReparsePoint struct {
Target string
IsMountPoint bool
}
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
// mount point reparse point.
type UnsupportedReparsePointError struct {
Tag uint32
}
func (e *UnsupportedReparsePointError) Error() string {
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
}
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
// or a mount point.
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
tag := binary.LittleEndian.Uint32(b[0:4])
return DecodeReparsePointData(tag, b[8:])
}
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
isMountPoint := false
switch tag {
case reparseTagMountPoint:
isMountPoint = true
case reparseTagSymlink:
default:
return nil, &UnsupportedReparsePointError{tag}
}
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
if !isMountPoint {
nameOffset += 4
}
nameLength := binary.LittleEndian.Uint16(b[6:8])
name := make([]uint16, nameLength/2)
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
if err != nil {
return nil, err
}
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
}
func isDriveLetter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
// mount point.
func EncodeReparsePoint(rp *ReparsePoint) []byte {
// Generate an NT path and determine if this is a relative path.
var ntTarget string
relative := false
if strings.HasPrefix(rp.Target, `\\?\`) {
ntTarget = `\??\` + rp.Target[4:]
} else if strings.HasPrefix(rp.Target, `\\`) {
ntTarget = `\??\UNC\` + rp.Target[2:]
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
ntTarget = `\??\` + rp.Target
} else {
ntTarget = rp.Target
relative = true
}
// The paths must be NUL-terminated even though they are counted strings.
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
size += len(ntTarget16)*2 + len(target16)*2
tag := uint32(reparseTagMountPoint)
if !rp.IsMountPoint {
tag = reparseTagSymlink
size += 4 // Add room for symlink flags
}
data := reparseDataBuffer{
ReparseTag: tag,
ReparseDataLength: uint16(size),
SubstituteNameOffset: 0,
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
PrintNameOffset: uint16(len(ntTarget16) * 2),
PrintNameLength: uint16((len(target16) - 1) * 2),
}
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, &data)
if !rp.IsMountPoint {
flags := uint32(0)
if relative {
flags |= 1
}
binary.Write(&b, binary.LittleEndian, flags)
}
binary.Write(&b, binary.LittleEndian, ntTarget16)
binary.Write(&b, binary.LittleEndian, target16)
return b.Bytes()
}

98
vendor/github.com/Microsoft/go-winio/sd.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
// +build windows
package winio
import (
"syscall"
"unsafe"
)
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
//sys localFree(mem uintptr) = LocalFree
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
const (
cERROR_NONE_MAPPED = syscall.Errno(1332)
)
type AccountLookupError struct {
Name string
Err error
}
func (e *AccountLookupError) Error() string {
if e.Name == "" {
return "lookup account: empty account name specified"
}
var s string
switch e.Err {
case cERROR_NONE_MAPPED:
s = "not found"
default:
s = e.Err.Error()
}
return "lookup account " + e.Name + ": " + s
}
type SddlConversionError struct {
Sddl string
Err error
}
func (e *SddlConversionError) Error() string {
return "convert " + e.Sddl + ": " + e.Err.Error()
}
// LookupSidByName looks up the SID of an account by name
func LookupSidByName(name string) (sid string, err error) {
if name == "" {
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
}
var sidSize, sidNameUse, refDomainSize uint32
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
return "", &AccountLookupError{name, err}
}
sidBuffer := make([]byte, sidSize)
refDomainBuffer := make([]uint16, refDomainSize)
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
if err != nil {
return "", &AccountLookupError{name, err}
}
var strBuffer *uint16
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
if err != nil {
return "", &AccountLookupError{name, err}
}
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
localFree(uintptr(unsafe.Pointer(strBuffer)))
return sid, nil
}
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
var sdBuffer uintptr
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
if err != nil {
return nil, &SddlConversionError{sddl, err}
}
defer localFree(sdBuffer)
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
return sd, nil
}
func SecurityDescriptorToSddl(sd []byte) (string, error) {
var sddl *uint16
// The returned string length seems to including an aribtrary number of terminating NULs.
// Don't use it.
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
if err != nil {
return "", err
}
defer localFree(uintptr(unsafe.Pointer(sddl)))
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
}

26
vendor/github.com/Microsoft/go-winio/sd_test.go generated vendored Normal file
View File

@@ -0,0 +1,26 @@
package winio
import "testing"
func TestLookupInvalidSid(t *testing.T) {
_, err := LookupSidByName(".\\weoifjdsklfj")
aerr, ok := err.(*AccountLookupError)
if !ok || aerr.Err != cERROR_NONE_MAPPED {
t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err)
}
}
func TestLookupValidSid(t *testing.T) {
sid, err := LookupSidByName("Everyone")
if err != nil || sid != "S-1-1-0" {
t.Fatalf("expected S-1-1-0, got %s, %s", sid, err)
}
}
func TestLookupEmptyNameFails(t *testing.T) {
_, err := LookupSidByName(".\\weoifjdsklfj")
aerr, ok := err.(*AccountLookupError)
if !ok || aerr.Err != cERROR_NONE_MAPPED {
t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err)
}
}

3
vendor/github.com/Microsoft/go-winio/syscall.go generated vendored Normal file
View File

@@ -0,0 +1,3 @@
package winio
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go

View File

@@ -0,0 +1,25 @@
package main
import (
"flag"
"fmt"
"os"
"github.com/Microsoft/go-winio/internal/etw"
)
func main() {
var pn = flag.String("provider-name", "", "The human readable ETW provider name to be converted into GUID format")
flag.Parse()
if pn == nil || *pn == "" {
fmt.Fprint(os.Stderr, "--provider-name is required")
os.Exit(1)
}
p, err := etw.NewProvider(*pn, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to convert provider-name: '%s' with err: '%s", *pn, err)
os.Exit(1)
}
defer p.Close()
fmt.Fprintf(os.Stdout, "%s", p)
}

View File

@@ -0,0 +1,901 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hard-coding unicode mode for VHD library.
// +build ignore
/*
mksyscall_windows generates windows system call bodies
It parses all files specified on command line containing function
prototypes (like syscall_windows.go) and prints system call bodies
to standard output.
The prototypes are marked by lines beginning with "//sys" and read
like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument. This
includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different from it's winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
* Each function that returns err needs to supply a condition, that
return value of winapi will be tested against to detect failure.
This would set err to windows "last-error", otherwise it will be nil.
The value can be provided at end of //sys declaration, like
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
and is [failretval==0] by default.
Usage:
mksyscall_windows [flags] [path ...]
The flags are:
-output
Specify output file name (outputs to console if blank).
-trace
Generate print statement after every syscall.
*/
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"go/format"
"go/parser"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"text/template"
)
var (
filename = flag.String("output", "", "output file name (standard output if omitted)")
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
)
func trim(s string) string {
return strings.Trim(s, " \t")
}
var packageName string
func packagename() string {
return packageName
}
func syscalldot() string {
if packageName == "syscall" {
return ""
}
return "syscall."
}
// Param is function parameter
type Param struct {
Name string
Type string
fn *Fn
tmpVarIdx int
}
// tmpVar returns temp variable name that will be used to represent p during syscall.
func (p *Param) tmpVar() string {
if p.tmpVarIdx < 0 {
p.tmpVarIdx = p.fn.curTmpVarIdx
p.fn.curTmpVarIdx++
}
return fmt.Sprintf("_p%d", p.tmpVarIdx)
}
// BoolTmpVarCode returns source code for bool temp variable.
func (p *Param) BoolTmpVarCode() string {
const code = `var %s uint32
if %s {
%s = 1
} else {
%s = 0
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
}
// SliceTmpVarCode returns source code for slice temp variable.
func (p *Param) SliceTmpVarCode() string {
const code = `var %s *%s
if len(%s) > 0 {
%s = &%s[0]
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
}
// StringTmpVarCode returns source code for string temp variable.
func (p *Param) StringTmpVarCode() string {
errvar := p.fn.Rets.ErrorVarName()
if errvar == "" {
errvar = "_"
}
tmp := p.tmpVar()
const code = `var %s %s
%s, %s = %s(%s)`
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
if errvar == "-" {
return s
}
const morecode = `
if %s != nil {
return
}`
return s + fmt.Sprintf(morecode, errvar)
}
// TmpVarCode returns source code for temp variable.
func (p *Param) TmpVarCode() string {
switch {
case p.Type == "bool":
return p.BoolTmpVarCode()
case strings.HasPrefix(p.Type, "[]"):
return p.SliceTmpVarCode()
default:
return ""
}
}
// TmpVarHelperCode returns source code for helper's temp variable.
func (p *Param) TmpVarHelperCode() string {
if p.Type != "string" {
return ""
}
return p.StringTmpVarCode()
}
// SyscallArgList returns source code fragments representing p parameter
// in syscall. Slices are translated into 2 syscall parameters: pointer to
// the first element and length.
func (p *Param) SyscallArgList() []string {
t := p.HelperType()
var s string
switch {
case t[0] == '*':
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
case t == "bool":
s = p.tmpVar()
case strings.HasPrefix(t, "[]"):
return []string{
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
fmt.Sprintf("uintptr(len(%s))", p.Name),
}
default:
s = p.Name
}
return []string{fmt.Sprintf("uintptr(%s)", s)}
}
// IsError determines if p parameter is used to return error.
func (p *Param) IsError() bool {
return p.Name == "err" && p.Type == "error"
}
// HelperType returns type of parameter p used in helper function.
func (p *Param) HelperType() string {
if p.Type == "string" {
return p.fn.StrconvType()
}
return p.Type
}
// join concatenates parameters ps into a string with sep separator.
// Each parameter is converted into string by applying fn to it
// before conversion.
func join(ps []*Param, fn func(*Param) string, sep string) string {
if len(ps) == 0 {
return ""
}
a := make([]string, 0)
for _, p := range ps {
a = append(a, fn(p))
}
return strings.Join(a, sep)
}
// Rets describes function return parameters.
type Rets struct {
Name string
Type string
ReturnsError bool
FailCond string
}
// ErrorVarName returns error variable name for r.
func (r *Rets) ErrorVarName() string {
if r.ReturnsError {
return "err"
}
if r.Type == "error" {
return r.Name
}
return ""
}
// ToParams converts r into slice of *Param.
func (r *Rets) ToParams() []*Param {
ps := make([]*Param, 0)
if len(r.Name) > 0 {
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
}
if r.ReturnsError {
ps = append(ps, &Param{Name: "err", Type: "error"})
}
return ps
}
// List returns source code of syscall return parameters.
func (r *Rets) List() string {
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
if len(s) > 0 {
s = "(" + s + ")"
}
return s
}
// PrintList returns source code of trace printing part correspondent
// to syscall return values.
func (r *Rets) PrintList() string {
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// SetReturnValuesCode returns source code that accepts syscall return values.
func (r *Rets) SetReturnValuesCode() string {
if r.Name == "" && !r.ReturnsError {
return ""
}
retvar := "r0"
if r.Name == "" {
retvar = "r1"
}
errvar := "_"
if r.ReturnsError {
errvar = "e1"
}
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
}
func (r *Rets) useLongHandleErrorCode(retvar string) string {
const code = `if %s {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = %sEINVAL
}
}`
cond := retvar + " == 0"
if r.FailCond != "" {
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
}
return fmt.Sprintf(code, cond, syscalldot())
}
// SetErrorCode returns source code that sets return parameters.
func (r *Rets) SetErrorCode() string {
const code = `if r0 != 0 {
%s = %sErrno(r0)
}`
if r.Name == "" && !r.ReturnsError {
return ""
}
if r.Name == "" {
return r.useLongHandleErrorCode("r1")
}
if r.Type == "error" {
return fmt.Sprintf(code, r.Name, syscalldot())
}
s := ""
switch {
case r.Type[0] == '*':
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
case r.Type == "bool":
s = fmt.Sprintf("%s = r0 != 0", r.Name)
default:
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
}
if !r.ReturnsError {
return s
}
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
}
// Fn describes syscall function.
type Fn struct {
Name string
Params []*Param
Rets *Rets
PrintTrace bool
dllname string
dllfuncname string
src string
// TODO: get rid of this field and just use parameter index instead
curTmpVarIdx int // insure tmp variables have uniq names
}
// extractParams parses s to extract function parameters.
func extractParams(s string, f *Fn) ([]*Param, error) {
s = trim(s)
if s == "" {
return nil, nil
}
a := strings.Split(s, ",")
ps := make([]*Param, len(a))
for i := range ps {
s2 := trim(a[i])
b := strings.Split(s2, " ")
if len(b) != 2 {
b = strings.Split(s2, "\t")
if len(b) != 2 {
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
}
}
ps[i] = &Param{
Name: trim(b[0]),
Type: trim(b[1]),
fn: f,
tmpVarIdx: -1,
}
}
return ps, nil
}
// extractSection extracts text out of string s starting after start
// and ending just before end. found return value will indicate success,
// and prefix, body and suffix will contain correspondent parts of string s.
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
s = trim(s)
if strings.HasPrefix(s, string(start)) {
// no prefix
body = s[1:]
} else {
a := strings.SplitN(s, string(start), 2)
if len(a) != 2 {
return "", "", s, false
}
prefix = a[0]
body = a[1]
}
a := strings.SplitN(body, string(end), 2)
if len(a) != 2 {
return "", "", "", false
}
return prefix, a[0], a[1], true
}
// newFn parses string s and return created function Fn.
func newFn(s string) (*Fn, error) {
s = trim(s)
f := &Fn{
Rets: &Rets{},
src: s,
PrintTrace: *printTraceFlag,
}
// function name and args
prefix, body, s, found := extractSection(s, '(', ')')
if !found || prefix == "" {
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
}
f.Name = prefix
var err error
f.Params, err = extractParams(body, f)
if err != nil {
return nil, err
}
// return values
_, body, s, found = extractSection(s, '(', ')')
if found {
r, err := extractParams(body, f)
if err != nil {
return nil, err
}
switch len(r) {
case 0:
case 1:
if r[0].IsError() {
f.Rets.ReturnsError = true
} else {
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
}
case 2:
if !r[1].IsError() {
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
}
f.Rets.ReturnsError = true
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
default:
return nil, errors.New("Too many return values in \"" + f.src + "\"")
}
}
// fail condition
_, body, s, found = extractSection(s, '[', ']')
if found {
f.Rets.FailCond = body
}
// dll and dll function names
s = trim(s)
if s == "" {
return f, nil
}
if !strings.HasPrefix(s, "=") {
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
s = trim(s[1:])
a := strings.Split(s, ".")
switch len(a) {
case 1:
f.dllfuncname = a[0]
case 2:
f.dllname = a[0]
f.dllfuncname = a[1]
default:
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
return f, nil
}
// DLLName returns DLL name for function f.
func (f *Fn) DLLName() string {
if f.dllname == "" {
return "kernel32"
}
return f.dllname
}
// DLLName returns DLL function name for function f.
func (f *Fn) DLLFuncName() string {
if f.dllfuncname == "" {
return f.Name
}
return f.dllfuncname
}
// ParamList returns source code for function f parameters.
func (f *Fn) ParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
}
// HelperParamList returns source code for helper function f parameters.
func (f *Fn) HelperParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
}
// ParamPrintList returns source code of trace printing part correspondent
// to syscall input parameters.
func (f *Fn) ParamPrintList() string {
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// ParamCount return number of syscall parameters for function f.
func (f *Fn) ParamCount() int {
n := 0
for _, p := range f.Params {
n += len(p.SyscallArgList())
}
return n
}
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
// to use. It returns parameter count for correspondent SyscallX function.
func (f *Fn) SyscallParamCount() int {
n := f.ParamCount()
switch {
case n <= 3:
return 3
case n <= 6:
return 6
case n <= 9:
return 9
case n <= 12:
return 12
case n <= 15:
return 15
default:
panic("too many arguments to system call")
}
}
// Syscall determines which SyscallX function to use for function f.
func (f *Fn) Syscall() string {
c := f.SyscallParamCount()
if c == 3 {
return syscalldot() + "Syscall"
}
return syscalldot() + "Syscall" + strconv.Itoa(c)
}
// SyscallParamList returns source code for SyscallX parameters for function f.
func (f *Fn) SyscallParamList() string {
a := make([]string, 0)
for _, p := range f.Params {
a = append(a, p.SyscallArgList()...)
}
for len(a) < f.SyscallParamCount() {
a = append(a, "0")
}
return strings.Join(a, ", ")
}
// HelperCallParamList returns source code of call into function f helper.
func (f *Fn) HelperCallParamList() string {
a := make([]string, 0, len(f.Params))
for _, p := range f.Params {
s := p.Name
if p.Type == "string" {
s = p.tmpVar()
}
a = append(a, s)
}
return strings.Join(a, ", ")
}
// IsUTF16 is true, if f is W (utf16) function. It is false
// for all A (ascii) functions.
func (f *Fn) IsUTF16() bool {
return true
}
// StrconvFunc returns name of Go string to OS string function for f.
func (f *Fn) StrconvFunc() string {
if f.IsUTF16() {
return syscalldot() + "UTF16PtrFromString"
}
return syscalldot() + "BytePtrFromString"
}
// StrconvType returns Go type name used for OS string for f.
func (f *Fn) StrconvType() string {
if f.IsUTF16() {
return "*uint16"
}
return "*byte"
}
// HasStringParam is true, if f has at least one string parameter.
// Otherwise it is false.
func (f *Fn) HasStringParam() bool {
for _, p := range f.Params {
if p.Type == "string" {
return true
}
}
return false
}
// HelperName returns name of function f helper.
func (f *Fn) HelperName() string {
if !f.HasStringParam() {
return f.Name
}
return "_" + f.Name
}
// Source files and functions.
type Source struct {
Funcs []*Fn
Files []string
StdLibImports []string
ExternalImports []string
}
func (src *Source) Import(pkg string) {
src.StdLibImports = append(src.StdLibImports, pkg)
sort.Strings(src.StdLibImports)
}
func (src *Source) ExternalImport(pkg string) {
src.ExternalImports = append(src.ExternalImports, pkg)
sort.Strings(src.ExternalImports)
}
// ParseFiles parses files listed in fs and extracts all syscall
// functions listed in sys comments. It returns source files
// and functions collection *Source if successful.
func ParseFiles(fs []string) (*Source, error) {
src := &Source{
Funcs: make([]*Fn, 0),
Files: make([]string, 0),
StdLibImports: []string{
"unsafe",
},
ExternalImports: make([]string, 0),
}
for _, file := range fs {
if err := src.ParseFile(file); err != nil {
return nil, err
}
}
return src, nil
}
// DLLs return dll names for a source set src.
func (src *Source) DLLs() []string {
uniq := make(map[string]bool)
r := make([]string, 0)
for _, f := range src.Funcs {
name := f.DLLName()
if _, found := uniq[name]; !found {
uniq[name] = true
r = append(r, name)
}
}
return r
}
// ParseFile adds additional file path to a source set src.
func (src *Source) ParseFile(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
s := bufio.NewScanner(file)
for s.Scan() {
t := trim(s.Text())
if len(t) < 7 {
continue
}
if !strings.HasPrefix(t, "//sys") {
continue
}
t = t[5:]
if !(t[0] == ' ' || t[0] == '\t') {
continue
}
f, err := newFn(t[1:])
if err != nil {
return err
}
src.Funcs = append(src.Funcs, f)
}
if err := s.Err(); err != nil {
return err
}
src.Files = append(src.Files, path)
// get package name
fset := token.NewFileSet()
_, err = file.Seek(0, 0)
if err != nil {
return err
}
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
if err != nil {
return err
}
packageName = pkg.Name.Name
return nil
}
// IsStdRepo returns true if src is part of standard library.
func (src *Source) IsStdRepo() (bool, error) {
if len(src.Files) == 0 {
return false, errors.New("no input files provided")
}
abspath, err := filepath.Abs(src.Files[0])
if err != nil {
return false, err
}
goroot := runtime.GOROOT()
if runtime.GOOS == "windows" {
abspath = strings.ToLower(abspath)
goroot = strings.ToLower(goroot)
}
sep := string(os.PathSeparator)
if !strings.HasSuffix(goroot, sep) {
goroot += sep
}
return strings.HasPrefix(abspath, goroot), nil
}
// Generate output source file from a source set src.
func (src *Source) Generate(w io.Writer) error {
const (
pkgStd = iota // any package in std library
pkgXSysWindows // x/sys/windows package
pkgOther
)
isStdRepo, err := src.IsStdRepo()
if err != nil {
return err
}
var pkgtype int
switch {
case isStdRepo:
pkgtype = pkgStd
case packageName == "windows":
// TODO: this needs better logic than just using package name
pkgtype = pkgXSysWindows
default:
pkgtype = pkgOther
}
if *systemDLL {
switch pkgtype {
case pkgStd:
src.Import("internal/syscall/windows/sysdll")
case pkgXSysWindows:
default:
src.ExternalImport("golang.org/x/sys/windows")
}
}
if packageName != "syscall" {
src.Import("syscall")
}
funcMap := template.FuncMap{
"packagename": packagename,
"syscalldot": syscalldot,
"newlazydll": func(dll string) string {
arg := "\"" + dll + ".dll\""
if !*systemDLL {
return syscalldot() + "NewLazyDLL(" + arg + ")"
}
switch pkgtype {
case pkgStd:
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
case pkgXSysWindows:
return "NewLazySystemDLL(" + arg + ")"
default:
return "windows.NewLazySystemDLL(" + arg + ")"
}
},
}
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
err = t.Execute(w, src)
if err != nil {
return errors.New("Failed to execute template: " + err.Error())
}
return nil
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(1)
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
src, err := ParseFiles(flag.Args())
if err != nil {
log.Fatal(err)
}
var buf bytes.Buffer
if err := src.Generate(&buf); err != nil {
log.Fatal(err)
}
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
if *filename == "" {
_, err = os.Stdout.Write(data)
} else {
err = ioutil.WriteFile(*filename, data, 0644)
}
if err != nil {
log.Fatal(err)
}
}
// TODO: use println instead to print in the following template
const srcTemplate = `
{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package {{packagename}}
import (
{{range .StdLibImports}}"{{.}}"
{{end}}
{{range .ExternalImports}}"{{.}}"
{{end}}
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e {{syscalldot}}Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
{{template "dlls" .}}
{{template "funcnames" .}})
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
{{end}}
{{/* help functions */}}
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
{{end}}{{end}}
{{define "funcnames"}}{{range .Funcs}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}")
{{end}}{{end}}
{{define "helperbody"}}
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
}
{{end}}
{{define "funcbody"}}
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
{{template "tmpvars" .}} {{template "syscall" .}}
{{template "seterror" .}}{{template "printtrace" .}} return
}
{{end}}
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
{{end}}{{end}}{{end}}
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
{{end}}{{end}}{{end}}
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
{{end}}{{end}}
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
{{end}}{{end}}
`

108
vendor/github.com/Microsoft/go-winio/vhd/vhd.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
// +build windows
package vhd
import "syscall"
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.OpenVirtualDisk
//sys detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = VirtDisk.DetachVirtualDisk
type virtualStorageType struct {
DeviceID uint32
VendorID [16]byte
}
const virtualDiskAccessNONE uint32 = 0
const virtualDiskAccessATTACHRO uint32 = 65536
const virtualDiskAccessATTACHRW uint32 = 131072
const virtualDiskAccessDETACH uint32 = 262144
const virtualDiskAccessGETINFO uint32 = 524288
const virtualDiskAccessCREATE uint32 = 1048576
const virtualDiskAccessMETAOPS uint32 = 2097152
const virtualDiskAccessREAD uint32 = 851968
const virtualDiskAccessALL uint32 = 4128768
const virtualDiskAccessWRITABLE uint32 = 3276800
const createVirtualDiskFlagNone uint32 = 0
const createVirtualDiskFlagFullPhysicalAllocation uint32 = 1
const createVirtualDiskFlagPreventWritesToSourceDisk uint32 = 2
const createVirtualDiskFlagDoNotCopyMetadataFromParent uint32 = 4
type version2 struct {
UniqueID [16]byte // GUID
MaximumSize uint64
BlockSizeInBytes uint32
SectorSizeInBytes uint32
ParentPath *uint16 // string
SourcePath *uint16 // string
OpenFlags uint32
ParentVirtualStorageType virtualStorageType
SourceVirtualStorageType virtualStorageType
ResiliencyGUID [16]byte // GUID
}
type createVirtualDiskParameters struct {
Version uint32 // Must always be set to 2
Version2 version2
}
// CreateVhdx will create a simple vhdx file at the given path using default values.
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
var defaultType virtualStorageType
parameters := createVirtualDiskParameters{
Version: 2,
Version2: version2{
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
},
}
var handle syscall.Handle
if err := createVirtualDisk(
&defaultType,
path,
virtualDiskAccessNONE,
nil,
createVirtualDiskFlagNone,
0,
&parameters,
nil,
&handle); err != nil {
return err
}
if err := syscall.CloseHandle(handle); err != nil {
return err
}
return nil
}
// DetachVhd detaches a VHD attached at the given path.
func DetachVhd(path string) error {
var (
defaultType virtualStorageType
handle syscall.Handle
)
if err := openVirtualDisk(
&defaultType,
path,
virtualDiskAccessDETACH,
0,
nil,
&handle); err != nil {
return err
}
defer syscall.CloseHandle(handle)
if err := detachVirtualDisk(handle, 0, 0); err != nil {
return err
}
return nil
}

99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go generated vendored Normal file
View File

@@ -0,0 +1,99 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package vhd
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
procOpenVirtualDisk = modVirtDisk.NewProc("OpenVirtualDisk")
procDetachVirtualDisk = modVirtDisk.NewProc("DetachVirtualDisk")
)
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
}
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
}
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(flags), uintptr(providerSpecificFlags))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

138
vendor/github.com/Microsoft/go-winio/wim/decompress.go generated vendored Normal file
View File

@@ -0,0 +1,138 @@
package wim
import (
"encoding/binary"
"io"
"io/ioutil"
"github.com/Microsoft/go-winio/wim/lzx"
)
const chunkSize = 32768 // Compressed resource chunk size
type compressedReader struct {
r *io.SectionReader
d io.ReadCloser
chunks []int64
curChunk int
originalSize int64
}
func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) {
nchunks := (originalSize + chunkSize - 1) / chunkSize
var base int64
chunks := make([]int64, nchunks)
if originalSize <= 0xffffffff {
// 32-bit chunk offsets
base = (nchunks - 1) * 4
chunks32 := make([]uint32, nchunks-1)
err := binary.Read(r, binary.LittleEndian, chunks32)
if err != nil {
return nil, err
}
for i, n := range chunks32 {
chunks[i+1] = int64(n)
}
} else {
// 64-bit chunk offsets
base = (nchunks - 1) * 8
err := binary.Read(r, binary.LittleEndian, chunks[1:])
if err != nil {
return nil, err
}
}
for i, c := range chunks {
chunks[i] = c + base
}
cr := &compressedReader{
r: r,
chunks: chunks,
originalSize: originalSize,
}
err := cr.reset(int(offset / chunkSize))
if err != nil {
return nil, err
}
suboff := offset % chunkSize
if suboff != 0 {
_, err := io.CopyN(ioutil.Discard, cr.d, suboff)
if err != nil {
return nil, err
}
}
return cr, nil
}
func (r *compressedReader) chunkOffset(n int) int64 {
if n == len(r.chunks) {
return r.r.Size()
}
return r.chunks[n]
}
func (r *compressedReader) chunkSize(n int) int {
return int(r.chunkOffset(n+1) - r.chunkOffset(n))
}
func (r *compressedReader) uncompressedSize(n int) int {
if n < len(r.chunks)-1 {
return chunkSize
}
size := int(r.originalSize % chunkSize)
if size == 0 {
size = chunkSize
}
return size
}
func (r *compressedReader) reset(n int) error {
if n >= len(r.chunks) {
return io.EOF
}
if r.d != nil {
r.d.Close()
}
r.curChunk = n
size := r.chunkSize(n)
uncompressedSize := r.uncompressedSize(n)
section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size))
if size != uncompressedSize {
d, err := lzx.NewReader(section, uncompressedSize)
if err != nil {
return err
}
r.d = d
} else {
r.d = ioutil.NopCloser(section)
}
return nil
}
func (r *compressedReader) Read(b []byte) (int, error) {
for {
n, err := r.d.Read(b)
if err != io.EOF {
return n, err
}
err = r.reset(r.curChunk + 1)
if err != nil {
return n, err
}
}
}
func (r *compressedReader) Close() error {
var err error
if r.d != nil {
err = r.d.Close()
r.d = nil
}
return err
}

606
vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go generated vendored Normal file
View File

@@ -0,0 +1,606 @@
// Package lzx implements a decompressor for the the WIM variant of the
// LZX compression algorithm.
//
// The LZX algorithm is an earlier variant of LZX DELTA, which is documented
// at https://msdn.microsoft.com/en-us/library/cc483133(v=exchg.80).aspx.
package lzx
import (
"bytes"
"encoding/binary"
"errors"
"io"
)
const (
maincodecount = 496
maincodesplit = 256
lencodecount = 249
lenshift = 9
codemask = 0x1ff
tablebits = 9
tablesize = 1 << tablebits
maxBlockSize = 32768
windowSize = 32768
maxTreePathLen = 16
e8filesize = 12000000
maxe8offset = 0x3fffffff
verbatimBlock = 1
alignedOffsetBlock = 2
uncompressedBlock = 3
)
var footerBits = [...]byte{
0, 0, 0, 0, 1, 1, 2, 2,
3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14,
}
var basePosition = [...]uint16{
0, 1, 2, 3, 4, 6, 8, 12,
16, 24, 32, 48, 64, 96, 128, 192,
256, 384, 512, 768, 1024, 1536, 2048, 3072,
4096, 6144, 8192, 12288, 16384, 24576, 32768,
}
var (
errCorrupt = errors.New("LZX data corrupt")
)
// Reader is an interface used by the decompressor to access
// the input stream. If the provided io.Reader does not implement
// Reader, then a bufio.Reader is used.
type Reader interface {
io.Reader
io.ByteReader
}
type decompressor struct {
r io.Reader
err error
unaligned bool
nbits byte
c uint32
lru [3]uint16
uncompressed int
windowReader *bytes.Reader
mainlens [maincodecount]byte
lenlens [lencodecount]byte
window [windowSize]byte
b []byte
bv int
bo int
}
//go:noinline
func (f *decompressor) fail(err error) {
if f.err == nil {
f.err = err
}
f.bo = 0
f.bv = 0
}
func (f *decompressor) ensureAtLeast(n int) error {
if f.bv-f.bo >= n {
return nil
}
if f.err != nil {
return f.err
}
if f.bv != f.bo {
copy(f.b[:f.bv-f.bo], f.b[f.bo:f.bv])
}
n, err := io.ReadAtLeast(f.r, f.b[f.bv-f.bo:], n)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
} else {
f.fail(err)
}
return err
}
f.bv = f.bv - f.bo + n
f.bo = 0
return nil
}
// feed retrieves another 16-bit word from the stream and consumes
// it into f.c. It returns false if there are no more bytes available.
// Otherwise, on error, it sets f.err.
func (f *decompressor) feed() bool {
err := f.ensureAtLeast(2)
if err != nil {
if err == io.ErrUnexpectedEOF {
return false
}
}
f.c |= (uint32(f.b[f.bo+1])<<8 | uint32(f.b[f.bo])) << (16 - f.nbits)
f.nbits += 16
f.bo += 2
return true
}
// getBits retrieves the next n bits from the byte stream. n
// must be <= 16. It sets f.err on error.
func (f *decompressor) getBits(n byte) uint16 {
if f.nbits < n {
if !f.feed() {
f.fail(io.ErrUnexpectedEOF)
}
}
c := uint16(f.c >> (32 - n))
f.c <<= n
f.nbits -= n
return c
}
type huffman struct {
extra [][]uint16
maxbits byte
table [tablesize]uint16
}
// buildTable builds a huffman decoding table from a slice of code lengths,
// one per code, in order. Each code length must be <= maxTreePathLen.
// See https://en.wikipedia.org/wiki/Canonical_Huffman_code.
func buildTable(codelens []byte) *huffman {
// Determine the number of codes of each length, and the
// maximum length.
var count [maxTreePathLen + 1]uint
var max byte
for _, cl := range codelens {
count[cl]++
if max < cl {
max = cl
}
}
if max == 0 {
return &huffman{}
}
// Determine the first code of each length.
var first [maxTreePathLen + 1]uint
code := uint(0)
for i := byte(1); i <= max; i++ {
code <<= 1
first[i] = code
code += count[i]
}
if code != 1<<max {
return nil
}
// Build a table for code lookup. For code sizes < max,
// put all possible suffixes for the code into the table, too.
// For max > tablebits, split long codes into additional tables
// of suffixes of max-tablebits length.
h := &huffman{maxbits: max}
if max > tablebits {
core := first[tablebits+1] / 2 // Number of codes that fit without extra tables
nextra := 1<<tablebits - core // Number of extra entries
h.extra = make([][]uint16, nextra)
for code := core; code < 1<<tablebits; code++ {
h.table[code] = uint16(code - core)
h.extra[code-core] = make([]uint16, 1<<(max-tablebits))
}
}
for i, cl := range codelens {
if cl != 0 {
code := first[cl]
first[cl]++
v := uint16(cl)<<lenshift | uint16(i)
if cl <= tablebits {
extendedCode := code << (tablebits - cl)
for j := uint(0); j < 1<<(tablebits-cl); j++ {
h.table[extendedCode+j] = v
}
} else {
prefix := code >> (cl - tablebits)
suffix := code & (1<<(cl-tablebits) - 1)
extendedCode := suffix << (max - cl)
for j := uint(0); j < 1<<(max-cl); j++ {
h.extra[h.table[prefix]][extendedCode+j] = v
}
}
}
}
return h
}
// getCode retrieves the next code using the provided
// huffman tree. It sets f.err on error.
func (f *decompressor) getCode(h *huffman) uint16 {
if h.maxbits > 0 {
if f.nbits < maxTreePathLen {
f.feed()
}
// For codes with length < tablebits, it doesn't matter
// what the remainder of the bits used for table lookup
// are, since entries with all possible suffixes were
// added to the table.
c := h.table[f.c>>(32-tablebits)]
if c >= 1<<lenshift {
// The code is already in c.
} else {
c = h.extra[c][f.c<<tablebits>>(32-(h.maxbits-tablebits))]
}
n := byte(c >> lenshift)
if f.nbits >= n {
// Only consume the length of the code, not the maximum
// code length.
f.c <<= n
f.nbits -= n
return c & codemask
}
f.fail(io.ErrUnexpectedEOF)
return 0
}
// This is an empty tree. It should not be used.
f.fail(errCorrupt)
return 0
}
// readTree updates the huffman tree path lengths in lens by
// reading and decoding lengths from the byte stream. lens
// should be prepopulated with the previous block's tree's path
// lengths. For the first block, lens should be zero.
func (f *decompressor) readTree(lens []byte) error {
// Get the pre-tree for the main tree.
var pretreeLen [20]byte
for i := range pretreeLen {
pretreeLen[i] = byte(f.getBits(4))
}
if f.err != nil {
return f.err
}
h := buildTable(pretreeLen[:])
// The lengths are encoded as a series of huffman codes
// encoded by the pre-tree.
for i := 0; i < len(lens); {
c := byte(f.getCode(h))
if f.err != nil {
return f.err
}
switch {
case c <= 16: // length is delta from previous length
lens[i] = (lens[i] + 17 - c) % 17
i++
case c == 17: // next n + 4 lengths are zero
zeroes := int(f.getBits(4)) + 4
if i+zeroes > len(lens) {
return errCorrupt
}
for j := 0; j < zeroes; j++ {
lens[i+j] = 0
}
i += zeroes
case c == 18: // next n + 20 lengths are zero
zeroes := int(f.getBits(5)) + 20
if i+zeroes > len(lens) {
return errCorrupt
}
for j := 0; j < zeroes; j++ {
lens[i+j] = 0
}
i += zeroes
case c == 19: // next n + 4 lengths all have the same value
same := int(f.getBits(1)) + 4
if i+same > len(lens) {
return errCorrupt
}
c = byte(f.getCode(h))
if c > 16 {
return errCorrupt
}
l := (lens[i] + 17 - c) % 17
for j := 0; j < same; j++ {
lens[i+j] = l
}
i += same
default:
return errCorrupt
}
}
if f.err != nil {
return f.err
}
return nil
}
func (f *decompressor) readBlockHeader() (byte, uint16, error) {
// If the previous block was an unaligned uncompressed block, restore
// 2-byte alignment.
if f.unaligned {
err := f.ensureAtLeast(1)
if err != nil {
return 0, 0, err
}
f.bo++
f.unaligned = false
}
blockType := f.getBits(3)
full := f.getBits(1)
var blockSize uint16
if full != 0 {
blockSize = maxBlockSize
} else {
blockSize = f.getBits(16)
if blockSize > maxBlockSize {
return 0, 0, errCorrupt
}
}
if f.err != nil {
return 0, 0, f.err
}
switch blockType {
case verbatimBlock, alignedOffsetBlock:
// The caller will read the huffman trees.
case uncompressedBlock:
if f.nbits > 16 {
panic("impossible: more than one 16-bit word remains")
}
// Drop the remaining bits in the current 16-bit word
// If there are no bits left, discard a full 16-bit word.
n := f.nbits
if n == 0 {
n = 16
}
f.getBits(n)
// Read the LRU values for the next block.
err := f.ensureAtLeast(12)
if err != nil {
return 0, 0, err
}
f.lru[0] = uint16(binary.LittleEndian.Uint32(f.b[f.bo : f.bo+4]))
f.lru[1] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+4 : f.bo+8]))
f.lru[2] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+8 : f.bo+12]))
f.bo += 12
default:
return 0, 0, errCorrupt
}
return byte(blockType), blockSize, nil
}
// readTrees reads the two or three huffman trees for the current block.
// readAligned specifies whether to read the aligned offset tree.
func (f *decompressor) readTrees(readAligned bool) (main *huffman, length *huffman, aligned *huffman, err error) {
// Aligned offset blocks start with a small aligned offset tree.
if readAligned {
var alignedLen [8]byte
for i := range alignedLen {
alignedLen[i] = byte(f.getBits(3))
}
aligned = buildTable(alignedLen[:])
if aligned == nil {
err = errors.New("corrupt")
return
}
}
// The main tree is encoded in two parts.
err = f.readTree(f.mainlens[:maincodesplit])
if err != nil {
return
}
err = f.readTree(f.mainlens[maincodesplit:])
if err != nil {
return
}
main = buildTable(f.mainlens[:])
if main == nil {
err = errors.New("corrupt")
return
}
// The length tree is encoding in a single part.
err = f.readTree(f.lenlens[:])
if err != nil {
return
}
length = buildTable(f.lenlens[:])
if length == nil {
err = errors.New("corrupt")
return
}
err = f.err
return
}
// readCompressedBlock decodes a compressed block, writing into the window
// starting at start and ending at end, and using the provided huffman trees.
func (f *decompressor) readCompressedBlock(start, end uint16, hmain, hlength, haligned *huffman) (int, error) {
i := start
for i < end {
main := f.getCode(hmain)
if f.err != nil {
break
}
if main < 256 {
// Literal byte.
f.window[i] = byte(main)
i++
continue
}
// This is a match backward in the window. Determine
// the offset and dlength.
matchlen := (main - 256) % 8
slot := (main - 256) / 8
// The length is either the low bits of the code,
// or if this is 7, is encoded with the length tree.
if matchlen == 7 {
matchlen += f.getCode(hlength)
}
matchlen += 2
var matchoffset uint16
if slot < 3 {
// The offset is one of the LRU values.
matchoffset = f.lru[slot]
f.lru[slot] = f.lru[0]
f.lru[0] = matchoffset
} else {
// The offset is encoded as a combination of the
// slot and more bits from the bit stream.
offsetbits := footerBits[slot]
var verbatimbits, alignedbits uint16
if offsetbits > 0 {
if haligned != nil && offsetbits >= 3 {
// This is an aligned offset block. Combine
// the bits written verbatim with the aligned
// offset tree code.
verbatimbits = f.getBits(offsetbits-3) * 8
alignedbits = f.getCode(haligned)
} else {
// There are no aligned offset bits to read,
// only verbatim bits.
verbatimbits = f.getBits(offsetbits)
alignedbits = 0
}
}
matchoffset = basePosition[slot] + verbatimbits + alignedbits - 2
// Update the LRU cache.
f.lru[2] = f.lru[1]
f.lru[1] = f.lru[0]
f.lru[0] = matchoffset
}
if matchoffset <= i && matchlen <= end-i {
copyend := i + matchlen
for ; i < copyend; i++ {
f.window[i] = f.window[i-matchoffset]
}
} else {
f.fail(errCorrupt)
break
}
}
return int(i - start), f.err
}
// readBlock decodes the current block and returns the number of uncompressed bytes.
func (f *decompressor) readBlock(start uint16) (int, error) {
blockType, size, err := f.readBlockHeader()
if err != nil {
return 0, err
}
if blockType == uncompressedBlock {
if size%2 == 1 {
// Remember to realign the byte stream at the next block.
f.unaligned = true
}
copied := 0
if f.bo < f.bv {
copied = int(size)
s := int(start)
if copied > f.bv-f.bo {
copied = f.bv - f.bo
}
copy(f.window[s:s+copied], f.b[f.bo:f.bo+copied])
f.bo += copied
}
n, err := io.ReadFull(f.r, f.window[start+uint16(copied):start+size])
return copied + n, err
}
hmain, hlength, haligned, err := f.readTrees(blockType == alignedOffsetBlock)
if err != nil {
return 0, err
}
return f.readCompressedBlock(start, start+size, hmain, hlength, haligned)
}
// decodeE8 reverses the 0xe8 x86 instruction encoding that was performed
// to the uncompressed data before it was compressed.
func decodeE8(b []byte, off int64) {
if off > maxe8offset || len(b) < 10 {
return
}
for i := 0; i < len(b)-10; i++ {
if b[i] == 0xe8 {
currentPtr := int32(off) + int32(i)
abs := int32(binary.LittleEndian.Uint32(b[i+1 : i+5]))
if abs >= -currentPtr && abs < e8filesize {
var rel int32
if abs >= 0 {
rel = abs - currentPtr
} else {
rel = abs + e8filesize
}
binary.LittleEndian.PutUint32(b[i+1:i+5], uint32(rel))
}
i += 4
}
}
}
func (f *decompressor) Read(b []byte) (int, error) {
// Read and uncompress everything.
if f.windowReader == nil {
n := 0
for n < f.uncompressed {
k, err := f.readBlock(uint16(n))
if err != nil {
return 0, err
}
n += k
}
decodeE8(f.window[:f.uncompressed], 0)
f.windowReader = bytes.NewReader(f.window[:f.uncompressed])
}
// Just read directly from the window.
return f.windowReader.Read(b)
}
func (f *decompressor) Close() error {
return nil
}
// NewReader returns a new io.ReadCloser that decompresses a
// WIM LZX stream until uncompressedSize bytes have been returned.
func NewReader(r io.Reader, uncompressedSize int) (io.ReadCloser, error) {
if uncompressedSize > windowSize {
return nil, errors.New("uncompressed size is limited to 32KB")
}
f := &decompressor{
lru: [3]uint16{1, 1, 1},
uncompressed: uncompressedSize,
b: make([]byte, 4096),
r: r,
}
return f, nil
}

View File

@@ -0,0 +1,51 @@
package main
import (
"flag"
"fmt"
"os"
"github.com/Microsoft/go-winio/wim"
)
func main() {
flag.Parse()
f, err := os.Open(flag.Arg(0))
if err != nil {
panic(err)
}
w, err := wim.NewReader(f)
if err != nil {
panic(err)
}
fmt.Printf("%#v\n%#v\n", w.Image[0], w.Image[0].Windows)
dir, err := w.Image[0].Open()
if err != nil {
panic(err)
}
err = recur(dir)
if err != nil {
panic(err)
}
}
func recur(d *wim.File) error {
files, err := d.Readdir()
if err != nil {
return fmt.Errorf("%s: %s", d.Name, err)
}
for _, f := range files {
if f.IsDir() {
err = recur(f)
if err != nil {
return fmt.Errorf("%s: %s", f.Name, err)
}
}
}
return nil
}

866
vendor/github.com/Microsoft/go-winio/wim/wim.go generated vendored Normal file
View File

@@ -0,0 +1,866 @@
// Package wim implements a WIM file parser.
//
// WIM files are used to distribute Windows file system and container images.
// They are documented at https://msdn.microsoft.com/en-us/library/windows/desktop/dd861280.aspx.
package wim
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"strconv"
"sync"
"time"
"unicode/utf16"
)
// File attribute constants from Windows.
const (
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
FILE_ATTRIBUTE_SYSTEM = 0x00000004
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
FILE_ATTRIBUTE_DEVICE = 0x00000040
FILE_ATTRIBUTE_NORMAL = 0x00000080
FILE_ATTRIBUTE_TEMPORARY = 0x00000100
FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200
FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
FILE_ATTRIBUTE_COMPRESSED = 0x00000800
FILE_ATTRIBUTE_OFFLINE = 0x00001000
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000
FILE_ATTRIBUTE_ENCRYPTED = 0x00004000
FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000
FILE_ATTRIBUTE_VIRTUAL = 0x00010000
FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000
FILE_ATTRIBUTE_EA = 0x00040000
)
// Windows processor architectures.
const (
PROCESSOR_ARCHITECTURE_INTEL = 0
PROCESSOR_ARCHITECTURE_MIPS = 1
PROCESSOR_ARCHITECTURE_ALPHA = 2
PROCESSOR_ARCHITECTURE_PPC = 3
PROCESSOR_ARCHITECTURE_SHX = 4
PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_IA64 = 6
PROCESSOR_ARCHITECTURE_ALPHA64 = 7
PROCESSOR_ARCHITECTURE_MSIL = 8
PROCESSOR_ARCHITECTURE_AMD64 = 9
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10
PROCESSOR_ARCHITECTURE_NEUTRAL = 11
PROCESSOR_ARCHITECTURE_ARM64 = 12
)
var wimImageTag = [...]byte{'M', 'S', 'W', 'I', 'M', 0, 0, 0}
type guid struct {
Data1 uint32
Data2 uint16
Data3 uint16
Data4 [8]byte
}
func (g guid) String() string {
return fmt.Sprintf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7])
}
type resourceDescriptor struct {
FlagsAndCompressedSize uint64
Offset int64
OriginalSize int64
}
type resFlag byte
const (
resFlagFree resFlag = 1 << iota
resFlagMetadata
resFlagCompressed
resFlagSpanned
)
const validate = false
const supportedResFlags = resFlagMetadata | resFlagCompressed
func (r *resourceDescriptor) Flags() resFlag {
return resFlag(r.FlagsAndCompressedSize >> 56)
}
func (r *resourceDescriptor) CompressedSize() int64 {
return int64(r.FlagsAndCompressedSize & 0xffffffffffffff)
}
func (r *resourceDescriptor) String() string {
s := fmt.Sprintf("%d bytes at %d", r.CompressedSize(), r.Offset)
if r.Flags()&4 != 0 {
s += fmt.Sprintf(" (uncompresses to %d)", r.OriginalSize)
}
return s
}
// SHA1Hash contains the SHA1 hash of a file or stream.
type SHA1Hash [20]byte
type streamDescriptor struct {
resourceDescriptor
PartNumber uint16
RefCount uint32
Hash SHA1Hash
}
type hdrFlag uint32
const (
hdrFlagReserved hdrFlag = 1 << iota
hdrFlagCompressed
hdrFlagReadOnly
hdrFlagSpanned
hdrFlagResourceOnly
hdrFlagMetadataOnly
hdrFlagWriteInProgress
hdrFlagRpFix
)
const (
hdrFlagCompressReserved hdrFlag = 1 << (iota + 16)
hdrFlagCompressXpress
hdrFlagCompressLzx
)
const supportedHdrFlags = hdrFlagRpFix | hdrFlagReadOnly | hdrFlagCompressed | hdrFlagCompressLzx
type wimHeader struct {
ImageTag [8]byte
Size uint32
Version uint32
Flags hdrFlag
CompressionSize uint32
WIMGuid guid
PartNumber uint16
TotalParts uint16
ImageCount uint32
OffsetTable resourceDescriptor
XMLData resourceDescriptor
BootMetadata resourceDescriptor
BootIndex uint32
Padding uint32
Integrity resourceDescriptor
Unused [60]byte
}
type securityblockDisk struct {
TotalLength uint32
NumEntries uint32
}
const securityblockDiskSize = 8
type direntry struct {
Attributes uint32
SecurityID uint32
SubdirOffset int64
Unused1, Unused2 int64
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
Hash SHA1Hash
Padding uint32
ReparseHardLink int64
StreamCount uint16
ShortNameLength uint16
FileNameLength uint16
}
var direntrySize = int64(binary.Size(direntry{}) + 8) // includes an 8-byte length prefix
type streamentry struct {
Unused int64
Hash SHA1Hash
NameLength int16
}
var streamentrySize = int64(binary.Size(streamentry{}) + 8) // includes an 8-byte length prefix
// Filetime represents a Windows time.
type Filetime struct {
LowDateTime uint32
HighDateTime uint32
}
// Time returns the time as time.Time.
func (ft *Filetime) Time() time.Time {
// 100-nanosecond intervals since January 1, 1601
nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)
// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)
nsec -= 116444736000000000
// convert into nanoseconds
nsec *= 100
return time.Unix(0, nsec)
}
// UnmarshalXML unmarshals the time from a WIM XML blob.
func (ft *Filetime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type time struct {
Low string `xml:"LOWPART"`
High string `xml:"HIGHPART"`
}
var t time
err := d.DecodeElement(&t, &start)
if err != nil {
return err
}
low, err := strconv.ParseUint(t.Low, 0, 32)
if err != nil {
return err
}
high, err := strconv.ParseUint(t.High, 0, 32)
if err != nil {
return err
}
ft.LowDateTime = uint32(low)
ft.HighDateTime = uint32(high)
return nil
}
type info struct {
Image []ImageInfo `xml:"IMAGE"`
}
// ImageInfo contains information about the image.
type ImageInfo struct {
Name string `xml:"NAME"`
Index int `xml:"INDEX,attr"`
CreationTime Filetime `xml:"CREATIONTIME"`
ModTime Filetime `xml:"LASTMODIFICATIONTIME"`
Windows *WindowsInfo `xml:"WINDOWS"`
}
// WindowsInfo contains information about the Windows installation in the image.
type WindowsInfo struct {
Arch byte `xml:"ARCH"`
ProductName string `xml:"PRODUCTNAME"`
EditionID string `xml:"EDITIONID"`
InstallationType string `xml:"INSTALLATIONTYPE"`
ProductType string `xml:"PRODUCTTYPE"`
Languages []string `xml:"LANGUAGES>LANGUAGE"`
DefaultLanguage string `xml:"LANGUAGES>DEFAULT"`
Version Version `xml:"VERSION"`
SystemRoot string `xml:"SYSTEMROOT"`
}
// Version represents a Windows build version.
type Version struct {
Major int `xml:"MAJOR"`
Minor int `xml:"MINOR"`
Build int `xml:"BUILD"`
SPBuild int `xml:"SPBUILD"`
SPLevel int `xml:"SPLEVEL"`
}
// ParseError is returned when the WIM cannot be parsed.
type ParseError struct {
Oper string
Path string
Err error
}
func (e *ParseError) Error() string {
if e.Path == "" {
return "WIM parse error at " + e.Oper + ": " + e.Err.Error()
}
return fmt.Sprintf("WIM parse error: %s %s: %s", e.Oper, e.Path, e.Err.Error())
}
// Reader provides functions to read a WIM file.
type Reader struct {
hdr wimHeader
r io.ReaderAt
fileData map[SHA1Hash]resourceDescriptor
XMLInfo string // The XML information about the WIM.
Image []*Image // The WIM's images.
}
// Image represents an image within a WIM file.
type Image struct {
wim *Reader
offset resourceDescriptor
sds [][]byte
rootOffset int64
r io.ReadCloser
curOffset int64
m sync.Mutex
ImageInfo
}
// StreamHeader contains alternate data stream metadata.
type StreamHeader struct {
Name string
Hash SHA1Hash
Size int64
}
// Stream represents an alternate data stream or reparse point data stream.
type Stream struct {
StreamHeader
wim *Reader
offset resourceDescriptor
}
// FileHeader contains file metadata.
type FileHeader struct {
Name string
ShortName string
Attributes uint32
SecurityDescriptor []byte
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
Hash SHA1Hash
Size int64
LinkID int64
ReparseTag uint32
ReparseReserved uint32
}
// File represents a file or directory in a WIM image.
type File struct {
FileHeader
Streams []*Stream
offset resourceDescriptor
img *Image
subdirOffset int64
}
// NewReader returns a Reader that can be used to read WIM file data.
func NewReader(f io.ReaderAt) (*Reader, error) {
r := &Reader{r: f}
section := io.NewSectionReader(f, 0, 0xffff)
err := binary.Read(section, binary.LittleEndian, &r.hdr)
if err != nil {
return nil, err
}
if r.hdr.ImageTag != wimImageTag {
return nil, &ParseError{Oper: "image tag", Err: errors.New("not a WIM file")}
}
if r.hdr.Flags&^supportedHdrFlags != 0 {
return nil, fmt.Errorf("unsupported WIM flags %x", r.hdr.Flags&^supportedHdrFlags)
}
if r.hdr.CompressionSize != 0x8000 {
return nil, fmt.Errorf("unsupported compression size %d", r.hdr.CompressionSize)
}
if r.hdr.TotalParts != 1 {
return nil, errors.New("multi-part WIM not supported")
}
fileData, images, err := r.readOffsetTable(&r.hdr.OffsetTable)
if err != nil {
return nil, err
}
xmlinfo, err := r.readXML()
if err != nil {
return nil, err
}
var info info
err = xml.Unmarshal([]byte(xmlinfo), &info)
if err != nil {
return nil, &ParseError{Oper: "XML info", Err: err}
}
for i, img := range images {
for _, imgInfo := range info.Image {
if imgInfo.Index == i+1 {
img.ImageInfo = imgInfo
break
}
}
}
r.fileData = fileData
r.Image = images
r.XMLInfo = xmlinfo
return r, nil
}
// Close releases resources associated with the Reader.
func (r *Reader) Close() error {
for _, img := range r.Image {
img.reset()
}
return nil
}
func (r *Reader) resourceReader(hdr *resourceDescriptor) (io.ReadCloser, error) {
return r.resourceReaderWithOffset(hdr, 0)
}
func (r *Reader) resourceReaderWithOffset(hdr *resourceDescriptor, offset int64) (io.ReadCloser, error) {
var sr io.ReadCloser
section := io.NewSectionReader(r.r, hdr.Offset, hdr.CompressedSize())
if hdr.Flags()&resFlagCompressed == 0 {
section.Seek(offset, 0)
sr = ioutil.NopCloser(section)
} else {
cr, err := newCompressedReader(section, hdr.OriginalSize, offset)
if err != nil {
return nil, err
}
sr = cr
}
return sr, nil
}
func (r *Reader) readResource(hdr *resourceDescriptor) ([]byte, error) {
rsrc, err := r.resourceReader(hdr)
if err != nil {
return nil, err
}
defer rsrc.Close()
return ioutil.ReadAll(rsrc)
}
func (r *Reader) readXML() (string, error) {
if r.hdr.XMLData.CompressedSize() == 0 {
return "", nil
}
rsrc, err := r.resourceReader(&r.hdr.XMLData)
if err != nil {
return "", err
}
defer rsrc.Close()
XMLData := make([]uint16, r.hdr.XMLData.OriginalSize/2)
err = binary.Read(rsrc, binary.LittleEndian, XMLData)
if err != nil {
return "", &ParseError{Oper: "XML data", Err: err}
}
// The BOM will always indicate little-endian UTF-16.
if XMLData[0] != 0xfeff {
return "", &ParseError{Oper: "XML data", Err: errors.New("invalid BOM")}
}
return string(utf16.Decode(XMLData[1:])), nil
}
func (r *Reader) readOffsetTable(res *resourceDescriptor) (map[SHA1Hash]resourceDescriptor, []*Image, error) {
fileData := make(map[SHA1Hash]resourceDescriptor)
var images []*Image
offsetTable, err := r.readResource(res)
if err != nil {
return nil, nil, &ParseError{Oper: "offset table", Err: err}
}
br := bytes.NewReader(offsetTable)
for i := 0; ; i++ {
var res streamDescriptor
err := binary.Read(br, binary.LittleEndian, &res)
if err == io.EOF {
break
}
if err != nil {
return nil, nil, &ParseError{Oper: "offset table", Err: err}
}
if res.Flags()&^supportedResFlags != 0 {
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("unsupported resource flag")}
}
// Validation for ad-hoc testing
if validate {
sec, err := r.resourceReader(&res.resourceDescriptor)
if err != nil {
panic(fmt.Sprint(i, err))
}
hash := sha1.New()
_, err = io.Copy(hash, sec)
sec.Close()
if err != nil {
panic(fmt.Sprint(i, err))
}
var cmphash SHA1Hash
copy(cmphash[:], hash.Sum(nil))
if cmphash != res.Hash {
panic(fmt.Sprint(i, "hash mismatch"))
}
}
if res.Flags()&resFlagMetadata != 0 {
image := &Image{
wim: r,
offset: res.resourceDescriptor,
}
images = append(images, image)
} else {
fileData[res.Hash] = res.resourceDescriptor
}
}
if len(images) != int(r.hdr.ImageCount) {
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("mismatched image count")}
}
return fileData, images, nil
}
func (r *Reader) readSecurityDescriptors(rsrc io.Reader) (sds [][]byte, n int64, err error) {
var secBlock securityblockDisk
err = binary.Read(rsrc, binary.LittleEndian, &secBlock)
if err != nil {
err = &ParseError{Oper: "security table", Err: err}
return
}
n += securityblockDiskSize
secSizes := make([]int64, secBlock.NumEntries)
err = binary.Read(rsrc, binary.LittleEndian, &secSizes)
if err != nil {
err = &ParseError{Oper: "security table sizes", Err: err}
return
}
n += int64(secBlock.NumEntries * 8)
sds = make([][]byte, secBlock.NumEntries)
for i, size := range secSizes {
sd := make([]byte, size&0xffffffff)
_, err = io.ReadFull(rsrc, sd)
if err != nil {
err = &ParseError{Oper: "security descriptor", Err: err}
return
}
n += int64(len(sd))
sds[i] = sd
}
secsize := int64((secBlock.TotalLength + 7) &^ 7)
if n > secsize {
err = &ParseError{Oper: "security descriptor", Err: errors.New("security descriptor table too small")}
return
}
_, err = io.CopyN(ioutil.Discard, rsrc, secsize-n)
if err != nil {
return
}
n = secsize
return
}
// Open parses the image and returns the root directory.
func (img *Image) Open() (*File, error) {
if img.sds == nil {
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, img.rootOffset)
if err != nil {
return nil, err
}
sds, n, err := img.wim.readSecurityDescriptors(rsrc)
if err != nil {
rsrc.Close()
return nil, err
}
img.sds = sds
img.r = rsrc
img.rootOffset = n
img.curOffset = n
}
f, err := img.readdir(img.rootOffset)
if err != nil {
return nil, err
}
if len(f) != 1 {
return nil, &ParseError{Oper: "root directory", Err: errors.New("expected exactly 1 root directory entry")}
}
return f[0], err
}
func (img *Image) reset() {
if img.r != nil {
img.r.Close()
img.r = nil
}
img.curOffset = -1
}
func (img *Image) readdir(offset int64) ([]*File, error) {
img.m.Lock()
defer img.m.Unlock()
if offset < img.curOffset || offset > img.curOffset+chunkSize {
// Reset to seek backward or to seek forward very far.
img.reset()
}
if img.r == nil {
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, offset)
if err != nil {
return nil, err
}
img.r = rsrc
img.curOffset = offset
}
if offset > img.curOffset {
_, err := io.CopyN(ioutil.Discard, img.r, offset-img.curOffset)
if err != nil {
img.reset()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
}
var entries []*File
for {
e, n, err := img.readNextEntry(img.r)
img.curOffset += n
if err == io.EOF {
break
}
if err != nil {
img.reset()
return nil, err
}
entries = append(entries, e)
}
return entries, nil
}
func (img *Image) readNextEntry(r io.Reader) (*File, int64, error) {
var length int64
err := binary.Read(r, binary.LittleEndian, &length)
if err != nil {
return nil, 0, &ParseError{Oper: "directory length check", Err: err}
}
if length == 0 {
return nil, 8, io.EOF
}
left := length
if left < direntrySize {
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short")}
}
var dentry direntry
err = binary.Read(r, binary.LittleEndian, &dentry)
if err != nil {
return nil, 0, &ParseError{Oper: "directory entry", Err: err}
}
left -= direntrySize
namesLen := int64(dentry.FileNameLength + 2 + dentry.ShortNameLength)
if left < namesLen {
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short for names")}
}
names := make([]uint16, namesLen/2)
err = binary.Read(r, binary.LittleEndian, names)
if err != nil {
return nil, 0, &ParseError{Oper: "file name", Err: err}
}
left -= namesLen
var name, shortName string
if dentry.FileNameLength > 0 {
name = string(utf16.Decode(names[:dentry.FileNameLength/2]))
}
if dentry.ShortNameLength > 0 {
shortName = string(utf16.Decode(names[dentry.FileNameLength/2+1:]))
}
var offset resourceDescriptor
zerohash := SHA1Hash{}
if dentry.Hash != zerohash {
var ok bool
offset, ok = img.wim.fileData[dentry.Hash]
if !ok {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %#v", dentry)}
}
}
f := &File{
FileHeader: FileHeader{
Attributes: dentry.Attributes,
CreationTime: dentry.CreationTime,
LastAccessTime: dentry.LastAccessTime,
LastWriteTime: dentry.LastWriteTime,
Hash: dentry.Hash,
Size: offset.OriginalSize,
Name: name,
ShortName: shortName,
},
offset: offset,
img: img,
subdirOffset: dentry.SubdirOffset,
}
isDir := false
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT == 0 {
f.LinkID = dentry.ReparseHardLink
if dentry.Attributes&FILE_ATTRIBUTE_DIRECTORY != 0 {
isDir = true
}
} else {
f.ReparseTag = uint32(dentry.ReparseHardLink)
f.ReparseReserved = uint32(dentry.ReparseHardLink >> 32)
}
if isDir && f.subdirOffset == 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("no subdirectory data for directory")}
} else if !isDir && f.subdirOffset != 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("unexpected subdirectory data for non-directory")}
}
if dentry.SecurityID != 0xffffffff {
f.SecurityDescriptor = img.sds[dentry.SecurityID]
}
_, err = io.CopyN(ioutil.Discard, r, left)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, err
}
if dentry.StreamCount > 0 {
var streams []*Stream
for i := uint16(0); i < dentry.StreamCount; i++ {
s, n, err := img.readNextStream(r)
length += n
if err != nil {
return nil, 0, err
}
// The first unnamed stream should be treated as the file stream.
if i == 0 && s.Name == "" {
f.Hash = s.Hash
f.Size = s.Size
f.offset = s.offset
} else if s.Name != "" {
streams = append(streams, s)
}
}
f.Streams = streams
}
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT != 0 && f.Size == 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("reparse point is missing reparse stream")}
}
return f, length, nil
}
func (img *Image) readNextStream(r io.Reader) (*Stream, int64, error) {
var length int64
err := binary.Read(r, binary.LittleEndian, &length)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, &ParseError{Oper: "stream length check", Err: err}
}
left := length
if left < streamentrySize {
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short")}
}
var sentry streamentry
err = binary.Read(r, binary.LittleEndian, &sentry)
if err != nil {
return nil, 0, &ParseError{Oper: "stream entry", Err: err}
}
left -= streamentrySize
if left < int64(sentry.NameLength) {
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short for name")}
}
names := make([]uint16, sentry.NameLength/2)
err = binary.Read(r, binary.LittleEndian, names)
if err != nil {
return nil, 0, &ParseError{Oper: "file name", Err: err}
}
left -= int64(sentry.NameLength)
name := string(utf16.Decode(names))
var offset resourceDescriptor
if sentry.Hash != (SHA1Hash{}) {
var ok bool
offset, ok = img.wim.fileData[sentry.Hash]
if !ok {
return nil, 0, &ParseError{Oper: "stream entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %v", sentry.Hash)}
}
}
s := &Stream{
StreamHeader: StreamHeader{
Hash: sentry.Hash,
Size: offset.OriginalSize,
Name: name,
},
wim: img.wim,
offset: offset,
}
_, err = io.CopyN(ioutil.Discard, r, left)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, err
}
return s, length, nil
}
// Open returns an io.ReadCloser that can be used to read the stream's contents.
func (s *Stream) Open() (io.ReadCloser, error) {
return s.wim.resourceReader(&s.offset)
}
// Open returns an io.ReadCloser that can be used to read the file's contents.
func (f *File) Open() (io.ReadCloser, error) {
return f.img.wim.resourceReader(&f.offset)
}
// Readdir reads the directory entries.
func (f *File) Readdir() ([]*File, error) {
if !f.IsDir() {
return nil, errors.New("not a directory")
}
return f.img.readdir(f.subdirOffset)
}
// IsDir returns whether the given file is a directory. It returns false when it
// is a directory reparse point.
func (f *FileHeader) IsDir() bool {
return f.Attributes&(FILE_ATTRIBUTE_DIRECTORY|FILE_ATTRIBUTE_REPARSE_POINT) == FILE_ATTRIBUTE_DIRECTORY
}

View File

@@ -0,0 +1,520 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package winio
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procLocalFree = modkernel32.NewProc("LocalFree")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
)
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
if newport == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func waitNamedPipe(name string, timeout uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _waitNamedPipe(_p0, timeout)
}
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
ptr = uintptr(r0)
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(str)
if err != nil {
return
}
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
}
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localFree(mem uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
return
}
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
return
}
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
var _p0 uint32
if releaseAll {
_p0 = 1
} else {
_p0 = 0
}
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
success = r0 != 0
if true {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func impersonateSelf(level uint32) (err error) {
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getCurrentThread() (h syscall.Handle) {
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
h = syscall.Handle(r0)
return
}
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
var _p1 *uint16
_p1, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _lookupPrivilegeValue(_p0, _p1, luid)
}
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeName(_p0, luid, buffer, size)
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
}
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
}
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

1
vendor/github.com/Microsoft/hcsshim/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
*.exe

17
vendor/github.com/Microsoft/hcsshim/.gometalinter.json generated vendored Normal file
View File

@@ -0,0 +1,17 @@
{
"Vendor": true,
"Deadline": "2m",
"Sort": [
"linter",
"severity",
"path",
"line"
],
"Skip": [
"internal\\schema2"
],
"EnableGC": true,
"Enable": [
"gofmt"
]
}

21
vendor/github.com/Microsoft/hcsshim/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

41
vendor/github.com/Microsoft/hcsshim/README.md generated vendored Normal file
View File

@@ -0,0 +1,41 @@
# hcsshim
[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Dependencies
This project requires Golang 1.9 or newer to build.
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
## Reporting Security Issues
Security issues and bugs should be reported privately, via email, to the Microsoft Security
Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should
receive a response within 24 hours. If for some reason you do not, please follow up via
email to ensure we received your original message. Further information, including the
[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in
the [Security TechCenter](https://technet.microsoft.com/en-us/security/default).
For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet
---------------
Copyright (c) 2018 Microsoft Corp. All rights reserved.

29
vendor/github.com/Microsoft/hcsshim/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,29 @@
version: 0.1.{build}
image: Visual Studio 2017
clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim
environment:
GOPATH: c:\gopath
PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH%
stack: go 1.11
build_script:
- appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip
- 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL
- gometalinter.exe --config .gometalinter.json ./...
- go build ./cmd/wclayer
- go build ./cmd/runhcs
- go build ./cmd/tar2ext4
- go test -v ./... -tags admin
- go test -c ./test/functional/ -tags functional
- go test -c ./test/runhcs/ -tags integration
artifacts:
- path: 'wclayer.exe'
- path: 'runhcs.exe'
- path: 'tar2ext4.exe'
- path: 'functional.test.exe'
- path: 'runhcs.test.exe'

191
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

22
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
runhcs is a fork of runc.
The following is runc's legal notice.
---
runc
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@@ -0,0 +1,848 @@
package main
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/cni"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hcsoci"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/regstate"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
)
var errContainerStopped = errors.New("container is stopped")
type persistedState struct {
// ID is the id of this container/UVM.
ID string `json:",omitempty"`
// Owner is the owner value passed into the runhcs command and may be `""`.
Owner string `json:",omitempty"`
// SandboxID is the sandbox identifer passed in via OCI specifications. This
// can either be the sandbox itself or the sandbox this container should run
// in. See `parseSandboxAnnotations`.
SandboxID string `json:",omitempty"`
// HostID will be VM ID hosting this container. If a sandbox is used it will
// match the `SandboxID`.
HostID string `json:",omitempty"`
// Bundle is the folder path on disk where the container state and spec files
// reside.
Bundle string `json:",omitempty"`
Created time.Time `json:",omitempty"`
Rootfs string `json:",omitempty"`
// Spec is the in memory deserialized values found on `Bundle\config.json`.
Spec *specs.Spec `json:",omitempty"`
RequestedNetNS string `json:",omitempty"`
// IsHost is `true` when this is a VM isolated config.
IsHost bool `json:",omitempty"`
// UniqueID is a unique ID generated per container config.
UniqueID guid.GUID `json:",omitempty"`
// HostUniqueID is the unique ID of the hosting VM if this container is
// hosted.
HostUniqueID guid.GUID `json:",omitempty"`
}
type containerStatus string
const (
containerRunning containerStatus = "running"
containerStopped containerStatus = "stopped"
containerCreated containerStatus = "created"
containerPaused containerStatus = "paused"
containerUnknown containerStatus = "unknown"
keyState = "state"
keyResources = "resources"
keyShimPid = "shim"
keyInitPid = "pid"
keyNetNS = "netns"
// keyPidMapFmt is the format to use when mapping a host OS pid to a guest
// pid.
keyPidMapFmt = "pid-%d"
)
type container struct {
persistedState
ShimPid int
hc *hcs.System
resources *hcsoci.Resources
}
func startProcessShim(id, pidFile, logFile string, spec *specs.Process) (_ *os.Process, err error) {
// Ensure the stdio handles inherit to the child process. This isn't undone
// after the StartProcess call because the caller never launches another
// process before exiting.
for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} {
err = windows.SetHandleInformation(windows.Handle(f.Fd()), windows.HANDLE_FLAG_INHERIT, windows.HANDLE_FLAG_INHERIT)
if err != nil {
return nil, err
}
}
args := []string{
"--stdin", strconv.Itoa(int(os.Stdin.Fd())),
"--stdout", strconv.Itoa(int(os.Stdout.Fd())),
"--stderr", strconv.Itoa(int(os.Stderr.Fd())),
}
if spec != nil {
args = append(args, "--exec")
}
if strings.HasPrefix(logFile, runhcs.SafePipePrefix) {
args = append(args, "--log-pipe", logFile)
}
args = append(args, id)
return launchShim("shim", pidFile, logFile, args, spec)
}
func launchShim(cmd, pidFile, logFile string, args []string, data interface{}) (_ *os.Process, err error) {
executable, err := os.Executable()
if err != nil {
return nil, err
}
// Create a pipe to use as stderr for the shim process. This is used to
// retrieve early error information, up to the point that the shim is ready
// to launch a process in the container.
rp, wp, err := os.Pipe()
if err != nil {
return nil, err
}
defer rp.Close()
defer wp.Close()
// Create a pipe to send the data, if one is provided.
var rdatap, wdatap *os.File
if data != nil {
rdatap, wdatap, err = os.Pipe()
if err != nil {
return nil, err
}
defer rdatap.Close()
defer wdatap.Close()
}
var log *os.File
fullargs := []string{os.Args[0]}
if logFile != "" {
if !strings.HasPrefix(logFile, runhcs.SafePipePrefix) {
log, err = os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666)
if err != nil {
return nil, err
}
defer log.Close()
}
fullargs = append(fullargs, "--log-format", logFormat)
if logrus.GetLevel() == logrus.DebugLevel {
fullargs = append(fullargs, "--debug")
}
}
fullargs = append(fullargs, cmd)
fullargs = append(fullargs, args...)
attr := &os.ProcAttr{
Files: []*os.File{rdatap, wp, log},
}
p, err := os.StartProcess(executable, fullargs, attr)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
p.Kill()
}
}()
wp.Close()
// Write the data if provided.
if data != nil {
rdatap.Close()
dataj, err := json.Marshal(data)
if err != nil {
return nil, err
}
_, err = wdatap.Write(dataj)
if err != nil {
return nil, err
}
wdatap.Close()
}
err = runhcs.GetErrorFromPipe(rp, p)
if err != nil {
return nil, err
}
if pidFile != "" {
if err = createPidFile(pidFile, p.Pid); err != nil {
return nil, err
}
}
return p, nil
}
// parseSandboxAnnotations searches `a` for various annotations used by
// different runtimes to represent a sandbox ID, and sandbox type.
//
// If found returns the tuple `(sandboxID, isSandbox)` where `isSandbox == true`
// indicates the identifer is the sandbox itself; `isSandbox == false` indicates
// the identifer is the sandbox in which to place this container. Otherwise
// returns `("", false)`.
func parseSandboxAnnotations(a map[string]string) (string, bool) {
var t, id string
if t = a["io.kubernetes.cri.container-type"]; t != "" {
id = a["io.kubernetes.cri.sandbox-id"]
} else if t = a["io.kubernetes.cri-o.ContainerType"]; t != "" {
id = a["io.kubernetes.cri-o.SandboxID"]
} else if t = a["io.kubernetes.docker.type"]; t != "" {
id = a["io.kubernetes.sandbox.id"]
if t == "podsandbox" {
t = "sandbox"
}
}
if t == "container" {
return id, false
}
if t == "sandbox" {
return id, true
}
return "", false
}
// parseAnnotationsBool searches `a` for `key` and if found verifies that the
// value is `true` or `false` in any case. If `key` is not found returns `def`.
func parseAnnotationsBool(a map[string]string, key string, def bool) bool {
if v, ok := a[key]; ok {
switch strings.ToLower(v) {
case "true":
return true
case "false":
return false
default:
logrus.WithFields(logrus.Fields{
logfields.OCIAnnotation: key,
logfields.Value: v,
logfields.ExpectedType: logfields.Bool,
}).Warning("annotation could not be parsed")
}
}
return def
}
// parseAnnotationsCPU searches `s.Annotations` for the CPU annotation. If
// not found searches `s` for the Windows CPU section. If neither are found
// returns `def`.
func parseAnnotationsCPU(s *specs.Spec, annotation string, def int32) int32 {
if m := parseAnnotationsUint64(s.Annotations, annotation, 0); m != 0 {
return int32(m)
}
if s.Windows != nil &&
s.Windows.Resources != nil &&
s.Windows.Resources.CPU != nil &&
s.Windows.Resources.CPU.Count != nil &&
*s.Windows.Resources.CPU.Count > 0 {
return int32(*s.Windows.Resources.CPU.Count)
}
return def
}
// parseAnnotationsMemory searches `s.Annotations` for the memory annotation. If
// not found searches `s` for the Windows memory section. If neither are found
// returns `def`.
func parseAnnotationsMemory(s *specs.Spec, annotation string, def int32) int32 {
if m := parseAnnotationsUint64(s.Annotations, annotation, 0); m != 0 {
return int32(m)
}
if s.Windows != nil &&
s.Windows.Resources != nil &&
s.Windows.Resources.Memory != nil &&
s.Windows.Resources.Memory.Limit != nil &&
*s.Windows.Resources.Memory.Limit > 0 {
return int32(*s.Windows.Resources.Memory.Limit)
}
return def
}
// parseAnnotationsPreferredRootFSType searches `a` for `key` and verifies that the
// value is in the set of allowed values. If `key` is not found returns `def`.
func parseAnnotationsPreferredRootFSType(a map[string]string, key string, def uvm.PreferredRootFSType) uvm.PreferredRootFSType {
if v, ok := a[key]; ok {
switch v {
case "initrd":
return uvm.PreferredRootFSTypeInitRd
case "vhd":
return uvm.PreferredRootFSTypeVHD
default:
logrus.Warningf("annotation: '%s', with value: '%s' must be 'initrd' or 'vhd'", key, v)
}
}
return def
}
// parseAnnotationsUint32 searches `a` for `key` and if found verifies that the
// value is a 32 bit unsigned integer. If `key` is not found returns `def`.
func parseAnnotationsUint32(a map[string]string, key string, def uint32) uint32 {
if v, ok := a[key]; ok {
countu, err := strconv.ParseUint(v, 10, 32)
if err == nil {
v := uint32(countu)
return v
}
logrus.WithFields(logrus.Fields{
logfields.OCIAnnotation: key,
logfields.Value: v,
logfields.ExpectedType: logfields.Uint32,
logrus.ErrorKey: err,
}).Warning("annotation could not be parsed")
}
return def
}
// parseAnnotationsUint64 searches `a` for `key` and if found verifies that the
// value is a 64 bit unsigned integer. If `key` is not found returns `def`.
func parseAnnotationsUint64(a map[string]string, key string, def uint64) uint64 {
if v, ok := a[key]; ok {
countu, err := strconv.ParseUint(v, 10, 64)
if err == nil {
return countu
}
logrus.WithFields(logrus.Fields{
logfields.OCIAnnotation: key,
logfields.Value: v,
logfields.ExpectedType: logfields.Uint64,
logrus.ErrorKey: err,
}).Warning("annotation could not be parsed")
}
return def
}
// startVMShim starts a vm-shim command with the specified `opts`. `opts` can be `uvm.OptionsWCOW` or `uvm.OptionsLCOW`
func (c *container) startVMShim(logFile string, opts interface{}) (*os.Process, error) {
var os string
if _, ok := opts.(*uvm.OptionsLCOW); ok {
os = "linux"
} else {
os = "windows"
}
args := []string{"--os", os}
if strings.HasPrefix(logFile, runhcs.SafePipePrefix) {
args = append(args, "--log-pipe", logFile)
}
args = append(args, c.VMPipePath())
return launchShim("vmshim", "", logFile, args, opts)
}
type containerConfig struct {
ID string
Owner string
HostID string
PidFile string
ShimLogFile, VMLogFile string
Spec *specs.Spec
VMConsolePipe string
}
func createContainer(cfg *containerConfig) (_ *container, err error) {
// Store the container information in a volatile registry key.
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
vmisolated := cfg.Spec.Linux != nil || (cfg.Spec.Windows != nil && cfg.Spec.Windows.HyperV != nil)
sandboxID, isSandbox := parseSandboxAnnotations(cfg.Spec.Annotations)
hostID := cfg.HostID
if isSandbox {
if sandboxID != cfg.ID {
return nil, errors.New("sandbox ID must match ID")
}
} else if sandboxID != "" {
// Validate that the sandbox container exists.
sandbox, err := getContainer(sandboxID, false)
if err != nil {
return nil, err
}
defer sandbox.Close()
if sandbox.SandboxID != sandboxID {
return nil, fmt.Errorf("container %s is not a sandbox", sandboxID)
}
if hostID == "" {
// Use the sandbox's host.
hostID = sandbox.HostID
} else if sandbox.HostID == "" {
return nil, fmt.Errorf("sandbox container %s is not running in a VM host, but host %s was specified", sandboxID, hostID)
} else if hostID != sandbox.HostID {
return nil, fmt.Errorf("sandbox container %s has a different host %s from the requested host %s", sandboxID, sandbox.HostID, hostID)
}
if vmisolated && hostID == "" {
return nil, fmt.Errorf("container %s is not a VM isolated sandbox", sandboxID)
}
}
uniqueID := guid.New()
newvm := false
var hostUniqueID guid.GUID
if hostID != "" {
host, err := getContainer(hostID, false)
if err != nil {
return nil, err
}
defer host.Close()
if !host.IsHost {
return nil, fmt.Errorf("host container %s is not a VM host", hostID)
}
hostUniqueID = host.UniqueID
} else if vmisolated && (isSandbox || cfg.Spec.Linux != nil || osversion.Get().Build >= osversion.RS5) {
// This handles all LCOW, Pod Sandbox, and (Windows Xenon V2 for RS5+)
hostID = cfg.ID
newvm = true
hostUniqueID = uniqueID
}
// Make absolute the paths in Root.Path and Windows.LayerFolders.
rootfs := ""
if cfg.Spec.Root != nil {
rootfs = cfg.Spec.Root.Path
if rootfs != "" && !filepath.IsAbs(rootfs) && !strings.HasPrefix(rootfs, `\\?\`) {
rootfs = filepath.Join(cwd, rootfs)
cfg.Spec.Root.Path = rootfs
}
}
netNS := ""
if cfg.Spec.Windows != nil {
for i, f := range cfg.Spec.Windows.LayerFolders {
if !filepath.IsAbs(f) && !strings.HasPrefix(rootfs, `\\?\`) {
cfg.Spec.Windows.LayerFolders[i] = filepath.Join(cwd, f)
}
}
// Determine the network namespace to use.
if cfg.Spec.Windows.Network != nil {
if cfg.Spec.Windows.Network.NetworkSharedContainerName != "" {
// RS4 case
err = stateKey.Get(cfg.Spec.Windows.Network.NetworkSharedContainerName, keyNetNS, &netNS)
if err != nil {
if _, ok := err.(*regstate.NoStateError); !ok {
return nil, err
}
}
} else if cfg.Spec.Windows.Network.NetworkNamespace != "" {
// RS5 case
netNS = cfg.Spec.Windows.Network.NetworkNamespace
}
}
}
// Store the initial container state in the registry so that the delete
// command can clean everything up if something goes wrong.
c := &container{
persistedState: persistedState{
ID: cfg.ID,
Owner: cfg.Owner,
Bundle: cwd,
Rootfs: rootfs,
Created: time.Now(),
Spec: cfg.Spec,
SandboxID: sandboxID,
HostID: hostID,
IsHost: newvm,
RequestedNetNS: netNS,
UniqueID: uniqueID,
HostUniqueID: hostUniqueID,
},
}
err = stateKey.Create(cfg.ID, keyState, &c.persistedState)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
c.Remove()
}
}()
if isSandbox && vmisolated {
cnicfg := cni.NewPersistedNamespaceConfig(netNS, cfg.ID, hostUniqueID)
err = cnicfg.Store()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
cnicfg.Remove()
}
}()
}
// Start a VM if necessary.
if newvm {
var opts interface{}
const (
annotationAllowOvercommit = "io.microsoft.virtualmachine.computetopology.memory.allowovercommit"
annotationEnableDeferredCommit = "io.microsoft.virtualmachine.computetopology.memory.enabledeferredcommit"
annotationMemorySizeInMB = "io.microsoft.virtualmachine.computetopology.memory.sizeinmb"
annotationProcessorCount = "io.microsoft.virtualmachine.computetopology.processor.count"
annotationVPMemCount = "io.microsoft.virtualmachine.devices.virtualpmem.maximumcount"
annotationVPMemSize = "io.microsoft.virtualmachine.devices.virtualpmem.maximumsizebytes"
annotationPreferredRootFSType = "io.microsoft.virtualmachine.lcow.preferredrootfstype"
)
if cfg.Spec.Linux != nil {
lopts := uvm.NewDefaultOptionsLCOW(vmID(c.ID), cfg.Owner)
lopts.MemorySizeInMB = parseAnnotationsMemory(cfg.Spec, annotationMemorySizeInMB, lopts.MemorySizeInMB)
lopts.AllowOvercommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationAllowOvercommit, lopts.AllowOvercommit)
lopts.EnableDeferredCommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationEnableDeferredCommit, lopts.EnableDeferredCommit)
lopts.ProcessorCount = parseAnnotationsCPU(cfg.Spec, annotationProcessorCount, lopts.ProcessorCount)
lopts.ConsolePipe = cfg.VMConsolePipe
lopts.VPMemDeviceCount = parseAnnotationsUint32(cfg.Spec.Annotations, annotationVPMemCount, lopts.VPMemDeviceCount)
lopts.VPMemSizeBytes = parseAnnotationsUint64(cfg.Spec.Annotations, annotationVPMemSize, lopts.VPMemSizeBytes)
lopts.PreferredRootFSType = parseAnnotationsPreferredRootFSType(cfg.Spec.Annotations, annotationPreferredRootFSType, lopts.PreferredRootFSType)
switch lopts.PreferredRootFSType {
case uvm.PreferredRootFSTypeInitRd:
lopts.RootFSFile = uvm.InitrdFile
case uvm.PreferredRootFSTypeVHD:
lopts.RootFSFile = uvm.VhdFile
}
opts = lopts
} else {
wopts := uvm.NewDefaultOptionsWCOW(vmID(c.ID), cfg.Owner)
wopts.MemorySizeInMB = parseAnnotationsMemory(cfg.Spec, annotationMemorySizeInMB, wopts.MemorySizeInMB)
wopts.AllowOvercommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationAllowOvercommit, wopts.AllowOvercommit)
wopts.EnableDeferredCommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationEnableDeferredCommit, wopts.EnableDeferredCommit)
wopts.ProcessorCount = parseAnnotationsCPU(cfg.Spec, annotationProcessorCount, wopts.ProcessorCount)
// In order for the UVM sandbox.vhdx not to collide with the actual
// nested Argon sandbox.vhdx we append the \vm folder to the last entry
// in the list.
layersLen := len(cfg.Spec.Windows.LayerFolders)
layers := make([]string, layersLen)
copy(layers, cfg.Spec.Windows.LayerFolders)
vmPath := filepath.Join(layers[layersLen-1], "vm")
err := os.MkdirAll(vmPath, 0)
if err != nil {
return nil, err
}
layers[layersLen-1] = vmPath
wopts.LayerFolders = layers
opts = wopts
}
shim, err := c.startVMShim(cfg.VMLogFile, opts)
if err != nil {
return nil, err
}
shim.Release()
}
if c.HostID != "" {
// Call to the VM shim process to create the container. This is done so
// that the VM process can keep track of the VM's virtual hardware
// resource use.
err = c.issueVMRequest(runhcs.OpCreateContainer)
if err != nil {
return nil, err
}
c.hc, err = hcs.OpenComputeSystem(cfg.ID)
if err != nil {
return nil, err
}
} else {
// Create the container directly from this process.
err = createContainerInHost(c, nil)
if err != nil {
return nil, err
}
}
// Create the shim process for the container.
err = startContainerShim(c, cfg.PidFile, cfg.ShimLogFile)
if err != nil {
if e := c.Kill(); e == nil {
c.Remove()
}
return nil, err
}
return c, nil
}
func (c *container) ShimPipePath() string {
return runhcs.SafePipePath("runhcs-shim-" + c.UniqueID.String())
}
func (c *container) VMPipePath() string {
return runhcs.VMPipePath(c.HostUniqueID)
}
func (c *container) VMIsolated() bool {
return c.HostID != ""
}
func (c *container) unmountInHost(vm *uvm.UtilityVM, all bool) error {
resources := &hcsoci.Resources{}
err := stateKey.Get(c.ID, keyResources, resources)
if _, ok := err.(*regstate.NoStateError); ok {
return nil
}
if err != nil {
return err
}
err = hcsoci.ReleaseResources(resources, vm, all)
if err != nil {
stateKey.Set(c.ID, keyResources, resources)
return err
}
err = stateKey.Clear(c.ID, keyResources)
if err != nil {
return err
}
return nil
}
func (c *container) Unmount(all bool) error {
if c.VMIsolated() {
op := runhcs.OpUnmountContainerDiskOnly
if all {
op = runhcs.OpUnmountContainer
}
err := c.issueVMRequest(op)
if err != nil {
if _, ok := err.(*noVMError); ok {
logrus.WithFields(logrus.Fields{
logfields.ContainerID: c.ID,
logfields.UVMID: c.HostID,
logrus.ErrorKey: errors.New("failed to unmount container resources"),
}).Warning("VM shim could not be contacted")
} else {
return err
}
}
} else {
c.unmountInHost(nil, false)
}
return nil
}
func createContainerInHost(c *container, vm *uvm.UtilityVM) (err error) {
if c.hc != nil {
return errors.New("container already created")
}
// Create the container without starting it.
opts := &hcsoci.CreateOptions{
ID: c.ID,
Owner: c.Owner,
Spec: c.Spec,
HostingSystem: vm,
NetworkNamespace: c.RequestedNetNS,
}
vmid := ""
if vm != nil {
vmid = vm.ID()
}
logrus.WithFields(logrus.Fields{
logfields.ContainerID: c.ID,
logfields.UVMID: vmid,
}).Info("creating container in UVM")
hc, resources, err := hcsoci.CreateContainer(opts)
if err != nil {
return err
}
defer func() {
if err != nil {
hc.Terminate()
hc.Wait()
hcsoci.ReleaseResources(resources, vm, true)
}
}()
// Record the network namespace to support namespace sharing by container ID.
if resources.NetNS() != "" {
err = stateKey.Set(c.ID, keyNetNS, resources.NetNS())
if err != nil {
return err
}
}
err = stateKey.Set(c.ID, keyResources, resources)
if err != nil {
return err
}
c.hc = hc
return nil
}
func startContainerShim(c *container, pidFile, logFile string) error {
// Launch a shim process to later execute a process in the container.
shim, err := startProcessShim(c.ID, pidFile, logFile, nil)
if err != nil {
return err
}
defer shim.Release()
defer func() {
if err != nil {
shim.Kill()
}
}()
c.ShimPid = shim.Pid
err = stateKey.Set(c.ID, keyShimPid, shim.Pid)
if err != nil {
return err
}
if pidFile != "" {
if err = createPidFile(pidFile, shim.Pid); err != nil {
return err
}
}
return nil
}
func (c *container) Close() error {
if c.hc == nil {
return nil
}
return c.hc.Close()
}
func (c *container) Exec() error {
err := c.hc.Start()
if err != nil {
return err
}
if c.Spec.Process == nil {
return nil
}
// Alert the shim that the container is ready.
pipe, err := winio.DialPipe(c.ShimPipePath(), nil)
if err != nil {
return err
}
defer pipe.Close()
shim, err := os.FindProcess(c.ShimPid)
if err != nil {
return err
}
defer shim.Release()
err = runhcs.GetErrorFromPipe(pipe, shim)
if err != nil {
return err
}
return nil
}
func getContainer(id string, notStopped bool) (*container, error) {
var c container
err := stateKey.Get(id, keyState, &c.persistedState)
if err != nil {
return nil, err
}
err = stateKey.Get(id, keyShimPid, &c.ShimPid)
if err != nil {
if _, ok := err.(*regstate.NoStateError); !ok {
return nil, err
}
c.ShimPid = -1
}
if notStopped && c.ShimPid == 0 {
return nil, errContainerStopped
}
hc, err := hcs.OpenComputeSystem(c.ID)
if err == nil {
c.hc = hc
} else if !hcs.IsNotExist(err) {
return nil, err
} else if notStopped {
return nil, errContainerStopped
}
return &c, nil
}
func (c *container) Remove() error {
// Unmount any layers or mapped volumes.
err := c.Unmount(!c.IsHost)
if err != nil {
return err
}
// Follow kata's example and delay tearing down the VM until the owning
// container is removed.
if c.IsHost {
vm, err := hcs.OpenComputeSystem(vmID(c.ID))
if err == nil {
if err := vm.Terminate(); hcs.IsPending(err) {
vm.Wait()
}
}
}
return stateKey.Remove(c.ID)
}
func (c *container) Kill() error {
if c.hc == nil {
return nil
}
err := c.hc.Terminate()
if hcs.IsPending(err) {
err = c.hc.Wait()
}
if hcs.IsAlreadyStopped(err) {
err = nil
}
return err
}
func (c *container) Status() (containerStatus, error) {
if c.hc == nil || c.ShimPid == 0 {
return containerStopped, nil
}
props, err := c.hc.Properties()
if err != nil {
if !strings.Contains(err.Error(), "operation is not valid in the current state") {
return "", err
}
return containerUnknown, nil
}
state := containerUnknown
switch props.State {
case "", "Created":
state = containerCreated
case "Running":
state = containerRunning
case "Paused":
state = containerPaused
case "Stopped":
state = containerStopped
}
return state, nil
}

View File

@@ -0,0 +1,76 @@
package main
import (
"os"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
gcsclient "github.com/Microsoft/opengcs/client"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
var createScratchCommand = cli.Command{
Name: "create-scratch",
Usage: "creates a scratch vhdx at 'destpath' that is ext4 formatted",
Description: "Creates a scratch vhdx at 'destpath' that is ext4 formatted",
Flags: []cli.Flag{
cli.StringFlag{
Name: "destpath",
Usage: "Required: describes the destination vhd path",
},
},
Before: appargs.Validate(),
Action: func(context *cli.Context) error {
dest := context.String("destpath")
if dest == "" {
return errors.New("'destpath' is required")
}
// If we only have v1 lcow support do it the old way.
if osversion.Get().Build < osversion.RS5 {
cfg := gcsclient.Config{
Options: gcsclient.Options{
KirdPath: filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers"),
KernelFile: "kernel",
InitrdFile: uvm.InitrdFile,
},
Name: "createscratch-uvm",
UvmTimeoutSeconds: 5 * 60, // 5 Min
}
if err := cfg.StartUtilityVM(); err != nil {
return errors.Wrapf(err, "failed to start '%s'", cfg.Name)
}
defer cfg.Uvm.Terminate()
if err := cfg.CreateExt4Vhdx(dest, lcow.DefaultScratchSizeGB, ""); err != nil {
return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", cfg.Name)
}
} else {
opts := uvm.NewDefaultOptionsLCOW("createscratch-uvm", context.GlobalString("owner"))
// 256MB with boot from vhd supported.
opts.MemorySizeInMB = 256
opts.VPMemDeviceCount = 1
convertUVM, err := uvm.CreateLCOW(opts)
if err != nil {
return errors.Wrapf(err, "failed to create '%s'", opts.ID)
}
defer convertUVM.Close()
if err := convertUVM.Start(); err != nil {
return errors.Wrapf(err, "failed to start '%s'", opts.ID)
}
if err := lcow.CreateScratch(convertUVM, dest, lcow.DefaultScratchSizeGB, "", ""); err != nil {
return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", opts.ID)
}
}
return nil
},
}

View File

@@ -0,0 +1,100 @@
package main
import (
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var createRunFlags = []cli.Flag{
cli.StringFlag{
Name: "bundle, b",
Value: "",
Usage: `path to the root of the bundle directory, defaults to the current directory`,
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "specify the file to write the process id to",
},
cli.StringFlag{
Name: "shim-log",
Value: "",
Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-<container-id>-shim-log) for the launched shim process`,
},
cli.StringFlag{
Name: "vm-log",
Value: "",
Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-<container-id>-vm-log) for the launched VM shim process`,
},
cli.StringFlag{
Name: "vm-console",
Value: "",
Usage: `path to the pipe for the VM's console (e.g. \\.\pipe\debugpipe)`,
},
cli.StringFlag{
Name: "host",
Value: "",
Usage: "host container whose VM this container should run in",
},
}
var createCommand = cli.Command{
Name: "create",
Usage: "create a container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.`,
Description: `The create command creates an instance of a container for a bundle. The bundle
is a directory with a specification file named "` + specConfig + `" and a root
filesystem.
The specification file includes an args parameter. The args parameter is used
to specify command(s) that get run when the container is started. To change the
command(s) that get executed on start, edit the args parameter of the spec. See
"runc spec --help" for more explanation.`,
Flags: append(createRunFlags),
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
cfg, err := containerConfigFromContext(context)
if err != nil {
return err
}
_, err = createContainer(cfg)
if err != nil {
return err
}
return nil
},
}
func containerConfigFromContext(context *cli.Context) (*containerConfig, error) {
id := context.Args().First()
pidFile, err := absPathOrEmpty(context.String("pid-file"))
if err != nil {
return nil, err
}
shimLog, err := absPathOrEmpty(context.String("shim-log"))
if err != nil {
return nil, err
}
vmLog, err := absPathOrEmpty(context.String("vm-log"))
if err != nil {
return nil, err
}
spec, err := setupSpec(context)
if err != nil {
return nil, err
}
return &containerConfig{
ID: id,
Owner: context.GlobalString("owner"),
PidFile: pidFile,
ShimLogFile: shimLog,
VMLogFile: vmLog,
VMConsolePipe: context.String("vm-console"),
Spec: spec,
HostID: context.String("host"),
}, nil
}

View File

@@ -0,0 +1,73 @@
package main
import (
"fmt"
"os"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/regstate"
"github.com/urfave/cli"
)
var deleteCommand = cli.Command{
Name: "delete",
Usage: "delete any resources held by the container often used with detached container",
ArgsUsage: `<container-id>
Where "<container-id>" is the name for the instance of the container.
EXAMPLE:
For example, if the container id is "ubuntu01" and runhcs list currently shows the
status of "ubuntu01" as "stopped" the following will delete resources held for
"ubuntu01" removing "ubuntu01" from the runhcs list of containers:
# runhcs delete ubuntu01`,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "Forcibly deletes the container if it is still running (uses SIGKILL)",
},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
force := context.Bool("force")
container, err := getContainer(id, false)
if err != nil {
if _, ok := err.(*regstate.NoStateError); ok {
if e := stateKey.Remove(id); e != nil {
fmt.Fprintf(os.Stderr, "remove %s: %v\n", id, e)
}
if force {
return nil
}
}
return err
}
defer container.Close()
s, err := container.Status()
if err != nil {
return err
}
kill := false
switch s {
case containerStopped:
case containerCreated:
kill = true
default:
if !force {
return fmt.Errorf("cannot delete container %s that is not stopped: %s\n", id, s)
}
kill = true
}
if kill {
err = container.Kill()
if err != nil {
return err
}
}
return container.Remove()
},
}

160
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/exec.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
package main
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/urfave/cli"
)
var execCommand = cli.Command{
Name: "exec",
Usage: "execute new process inside the container",
ArgsUsage: `<container-id> <command> [command options] || -p process.json <container-id>
Where "<container-id>" is the name for the instance of the container and
"<command>" is the command to be executed in the container.
"<command>" can't be empty unless a "-p" flag provided.
EXAMPLE:
For example, if the container is configured to run the linux ps command the
following will output a list of processes running in the container:
# runhcs exec <container-id> ps`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "cwd",
Usage: "current working directory in the container",
},
cli.StringSliceFlag{
Name: "env, e",
Usage: "set environment variables",
},
cli.BoolFlag{
Name: "tty, t",
Usage: "allocate a pseudo-TTY",
},
cli.StringFlag{
Name: "user, u",
},
cli.StringFlag{
Name: "process, p",
Usage: "path to the process.json",
},
cli.BoolFlag{
Name: "detach,d",
Usage: "detach from the container's process",
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "specify the file to write the process id to",
},
cli.StringFlag{
Name: "shim-log",
Value: "",
Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-<container-id>-<exec-id>-log) for the launched shim process`,
},
},
Before: appargs.Validate(argID, appargs.Rest(appargs.String)),
Action: func(context *cli.Context) error {
id := context.Args().First()
pidFile, err := absPathOrEmpty(context.String("pid-file"))
if err != nil {
return err
}
shimLog, err := absPathOrEmpty(context.String("shim-log"))
if err != nil {
return err
}
c, err := getContainer(id, false)
if err != nil {
return err
}
defer c.Close()
status, err := c.Status()
if err != nil {
return err
}
if status != containerRunning {
return errContainerStopped
}
spec, err := getProcessSpec(context, c)
if err != nil {
return err
}
p, err := startProcessShim(id, pidFile, shimLog, spec)
if err != nil {
return err
}
if !context.Bool("detach") {
state, err := p.Wait()
if err != nil {
return err
}
os.Exit(int(state.Sys().(syscall.WaitStatus).ExitCode))
}
return nil
},
SkipArgReorder: true,
}
func getProcessSpec(context *cli.Context, c *container) (*specs.Process, error) {
if path := context.String("process"); path != "" {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var p specs.Process
if err := json.NewDecoder(f).Decode(&p); err != nil {
return nil, err
}
return &p, validateProcessSpec(&p)
}
// process via cli flags
p := c.Spec.Process
if len(context.Args()) == 1 {
return nil, fmt.Errorf("process args cannot be empty")
}
p.Args = context.Args()[1:]
// override the cwd, if passed
if context.String("cwd") != "" {
p.Cwd = context.String("cwd")
}
// append the passed env variables
p.Env = append(p.Env, context.StringSlice("env")...)
// set the tty
if context.IsSet("tty") {
p.Terminal = context.Bool("tty")
}
// override the user, if passed
if context.String("user") != "" {
p.User.Username = context.String("user")
}
return p, nil
}
func validateProcessSpec(spec *specs.Process) error {
if spec.Cwd == "" {
return fmt.Errorf("Cwd property must not be empty")
}
// IsAbs doesnt recognize Unix paths on Windows builds so handle that case
// here.
if !filepath.IsAbs(spec.Cwd) && !strings.HasPrefix(spec.Cwd, "/") {
return fmt.Errorf("Cwd must be an absolute path")
}
if len(spec.Args) == 0 {
return fmt.Errorf("args must not be empty")
}
return nil
}

193
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill.go generated vendored Normal file
View File

@@ -0,0 +1,193 @@
package main
import (
"strconv"
"strings"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/osversion"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
var killCommand = cli.Command{
Name: "kill",
Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process",
ArgsUsage: `<container-id> [signal]
Where "<container-id>" is the name for the instance of the container and
"[signal]" is the signal to be sent to the init process.
EXAMPLE:
For example, if the container id is "ubuntu01" the following will send a "KILL"
signal to the init process of the "ubuntu01" container:
# runhcs kill ubuntu01 KILL`,
Flags: []cli.Flag{},
Before: appargs.Validate(argID, appargs.Optional(appargs.String)),
Action: func(context *cli.Context) error {
id := context.Args().First()
c, err := getContainer(id, true)
if err != nil {
return err
}
defer c.Close()
status, err := c.Status()
if err != nil {
return err
}
if status != containerRunning {
return errContainerStopped
}
signalsSupported := false
// The Signal feature was added in RS5
if osversion.Get().Build >= osversion.RS5 {
if c.IsHost || c.HostID != "" {
var hostID string
if c.IsHost {
// This is the LCOW, Pod Sandbox, or Windows Xenon V2 for RS5+
hostID = vmID(c.ID)
} else {
// This is the Nth container in a Pod
hostID = c.HostID
}
uvm, err := hcs.OpenComputeSystem(hostID)
if err != nil {
return err
}
defer uvm.Close()
if props, err := uvm.Properties(schema1.PropertyTypeGuestConnection); err == nil &&
props.GuestConnectionInfo.GuestDefinedCapabilities.SignalProcessSupported {
signalsSupported = true
}
} else if c.Spec.Linux == nil && c.Spec.Windows.HyperV == nil {
// RS5+ Windows Argon
signalsSupported = true
}
}
signal := 0
if signalsSupported {
signal, err = validateSigstr(context.Args().Get(1), signalsSupported, c.Spec.Linux != nil)
if err != nil {
return err
}
}
var pid int
if err := stateKey.Get(id, keyInitPid, &pid); err != nil {
return err
}
p, err := c.hc.OpenProcess(pid)
if err != nil {
return err
}
defer p.Close()
if signalsSupported && (c.Spec.Linux != nil || !c.Spec.Process.Terminal) {
opts := guestrequest.SignalProcessOptions{
Signal: signal,
}
return p.Signal(opts)
}
// Legacy signal issue a kill
return p.Kill()
},
}
func validateSigstr(sigstr string, signalsSupported bool, isLcow bool) (int, error) {
errInvalidSignal := errors.Errorf("invalid signal '%s'", sigstr)
// All flavors including legacy default to SIGTERM on LCOW CtrlC on Windows
if sigstr == "" {
if isLcow {
return 0xf, nil
}
return 0, nil
}
sigstr = strings.ToUpper(sigstr)
if !signalsSupported {
// If signals arent supported we just validate that its a known signal.
// We already return 0 since we only supported a platform Kill() at that
// time.
if isLcow {
switch sigstr {
case "15":
fallthrough
case "TERM":
fallthrough
case "SIGTERM":
return 0, nil
default:
return 0, errInvalidSignal
}
}
switch sigstr {
// Docker sends a UNIX term in the supported Windows Signal map.
case "15":
fallthrough
case "TERM":
fallthrough
case "0":
fallthrough
case "CTRLC":
return 0, nil
case "9":
fallthrough
case "KILL":
return 0, nil
default:
return 0, errInvalidSignal
}
} else {
if !isLcow {
// Docker sends the UNIX signal name or value. Convert them to the
// correct Windows signals.
switch sigstr {
case "15":
fallthrough
case "TERM":
return 0x0, nil // Convert to CTRLC
case "9":
fallthrough
case "KILL":
return 0x6, nil // Convert to CTRLSHUTDOWN
}
}
}
var sigmap map[string]int
if isLcow {
sigmap = signalMapLcow
} else {
sigmap = signalMapWindows
}
signal, err := strconv.Atoi(sigstr)
if err != nil {
// Signal might still match the string value
for k, v := range sigmap {
if k == sigstr {
return v, nil
}
}
return 0, errInvalidSignal
}
// Match signal by value
for _, v := range sigmap {
if signal == v {
return signal, nil
}
}
return 0, errInvalidSignal
}

View File

@@ -0,0 +1,111 @@
package main
import (
"fmt"
"strconv"
"strings"
"testing"
)
func runValidateSigstrTest(sigstr string, signalsSupported, isLcow bool,
expectedSignal int, expectedError bool, t *testing.T) {
signal, err := validateSigstr(sigstr, signalsSupported, isLcow)
if expectedError {
if err == nil {
t.Fatalf("Expected err: %v, got: nil", expectedError)
} else if err.Error() != fmt.Sprintf("invalid signal '%s'", sigstr) {
t.Fatalf("Expected err: %v, got: %v", expectedError, err)
}
}
if signal != expectedSignal {
t.Fatalf("Test - Signal: %s, Support: %v, LCOW: %v\nExpected signal: %v, got: %v",
sigstr, signalsSupported, isLcow,
expectedSignal, signal)
}
}
func Test_ValidateSigstr_Empty(t *testing.T) {
runValidateSigstrTest("", false, false, 0, false, t)
runValidateSigstrTest("", false, true, 0xf, false, t)
runValidateSigstrTest("", true, false, 0, false, t)
runValidateSigstrTest("", true, true, 0xf, false, t)
}
func Test_ValidateSigstr_LCOW_NoSignalSupport_Default(t *testing.T) {
runValidateSigstrTest("15", false, true, 0, false, t)
runValidateSigstrTest("TERM", false, true, 0, false, t)
runValidateSigstrTest("SIGTERM", false, true, 0, false, t)
}
func Test_ValidateSigstr_LCOW_NoSignalSupport_Default_Invalid(t *testing.T) {
runValidateSigstrTest("2", false, true, 0, true, t)
runValidateSigstrTest("test", false, true, 0, true, t)
}
func Test_ValidateSigstr_WCOW_NoSignalSupport_Default(t *testing.T) {
runValidateSigstrTest("15", false, false, 0, false, t)
runValidateSigstrTest("TERM", false, false, 0, false, t)
runValidateSigstrTest("0", false, false, 0, false, t)
runValidateSigstrTest("CTRLC", false, false, 0, false, t)
runValidateSigstrTest("9", false, false, 0, false, t)
runValidateSigstrTest("KILL", false, false, 0, false, t)
}
func Test_ValidateSigstr_WCOW_NoSignalSupport_Default_Invalid(t *testing.T) {
runValidateSigstrTest("2", false, false, 0, true, t)
runValidateSigstrTest("test", false, false, 0, true, t)
}
func Test_ValidateSigstr_LCOW_SignalSupport_SignalNames(t *testing.T) {
for k, v := range signalMapLcow {
runValidateSigstrTest(k, true, true, v, false, t)
// run it again with a case not in the map
lc := strings.ToLower(k)
if k == lc {
t.Fatalf("Expected lower casing - map: %v, got: %v", k, lc)
}
runValidateSigstrTest(lc, true, true, v, false, t)
}
}
func Test_ValidateSigstr_WCOW_SignalSupport_SignalNames(t *testing.T) {
for k, v := range signalMapWindows {
runValidateSigstrTest(k, true, false, v, false, t)
// run it again with a case not in the map
lc := strings.ToLower(k)
if k == lc {
t.Fatalf("Expected lower casing - map: %v, got: %v", k, lc)
}
runValidateSigstrTest(lc, true, false, v, false, t)
}
}
func Test_ValidateSigstr_LCOW_SignalSupport_SignalValues(t *testing.T) {
for _, v := range signalMapLcow {
str := strconv.Itoa(v)
runValidateSigstrTest(str, true, true, v, false, t)
}
}
func Test_ValidateSigstr_WCOW_SignalSupport_SignalValues(t *testing.T) {
for _, v := range signalMapWindows {
str := strconv.Itoa(v)
runValidateSigstrTest(str, true, false, v, false, t)
}
}
func Test_ValidateSigstr_WCOW_SignalSupport_Docker_SignalNames(t *testing.T) {
// Docker KILL -> CTRLSHUTDOWN when signals are supported
runValidateSigstrTest("KILL", true, false, 0x6, false, t)
// Docker TERM -> CTRLSHUTDOWN when signals are supported
runValidateSigstrTest("TERM", true, false, 0x0, false, t)
}
func Test_ValidateSigstr_WCOW_SignalSupport_Docker_SignalValues(t *testing.T) {
// Docker KILL -> CTRLSHUTDOWN when signals are supported
runValidateSigstrTest("9", true, false, 0x6, false, t)
// Docker TERM -> CTRLSHUTDOWN when signals are supported
runValidateSigstrTest("15", true, false, 0x0, false, t)
}

116
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/list.go generated vendored Normal file
View File

@@ -0,0 +1,116 @@
package main
import (
"fmt"
"os"
"text/tabwriter"
"time"
"encoding/json"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/urfave/cli"
)
const formatOptions = `table or json`
var listCommand = cli.Command{
Name: "list",
Usage: "lists containers started by runhcs with the given root",
ArgsUsage: `
Where the given root is specified via the global option "--root"
(default: "/run/runhcs").
EXAMPLE 1:
To list containers created via the default "--root":
# runhcs list
EXAMPLE 2:
To list containers created using a non-default value for "--root":
# runhcs --root value list`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Value: "table",
Usage: `select one of: ` + formatOptions,
},
cli.BoolFlag{
Name: "quiet, q",
Usage: "display only container IDs",
},
},
Before: appargs.Validate(),
Action: func(context *cli.Context) error {
s, err := getContainers(context)
if err != nil {
return err
}
if context.Bool("quiet") {
for _, item := range s {
fmt.Println(item.ID)
}
return nil
}
switch context.String("format") {
case "table":
w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0)
fmt.Fprint(w, "ID\tPID\tSTATUS\tBUNDLE\tCREATED\tOWNER\n")
for _, item := range s {
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n",
item.ID,
item.InitProcessPid,
item.Status,
item.Bundle,
item.Created.Format(time.RFC3339Nano),
item.Owner)
}
if err := w.Flush(); err != nil {
return err
}
case "json":
if err := json.NewEncoder(os.Stdout).Encode(s); err != nil {
return err
}
default:
return fmt.Errorf("invalid format option")
}
return nil
},
}
func getContainers(context *cli.Context) ([]runhcs.ContainerState, error) {
ids, err := stateKey.Enumerate()
if err != nil {
return nil, err
}
var s []runhcs.ContainerState
for _, id := range ids {
c, err := getContainer(id, false)
if err != nil {
fmt.Fprintf(os.Stderr, "reading state for %s: %v\n", id, err)
continue
}
status, err := c.Status()
if err != nil {
fmt.Fprintf(os.Stderr, "reading status for %s: %v\n", id, err)
}
s = append(s, runhcs.ContainerState{
ID: id,
Version: c.Spec.Version,
InitProcessPid: c.ShimPid,
Status: string(status),
Bundle: c.Bundle,
Rootfs: c.Rootfs,
Created: c.Created,
Annotations: c.Spec.Annotations,
})
c.Close()
}
return s, nil
}

174
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/main.go generated vendored Normal file
View File

@@ -0,0 +1,174 @@
package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/pkg/etwlogrus"
"github.com/Microsoft/hcsshim/internal/regstate"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// Add a manifest to get proper Windows version detection.
//
// goversioninfo can be installed with "go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo"
//go:generate goversioninfo -platform-specific
// version will be populated by the Makefile, read from
// VERSION file of the source code.
var version = ""
// gitCommit will be the hash that the binary was built from
// and will be populated by the Makefile
var gitCommit = ""
var stateKey *regstate.Key
var logFormat string
const (
specConfig = "config.json"
usage = `Open Container Initiative runtime for Windows
runhcs is a fork of runc, modified to run containers on Windows with or without Hyper-V isolation. Like runc, it is a command line client for running applications packaged according to the Open Container Initiative (OCI) format.
runhcs integrates with existing process supervisors to provide a production container runtime environment for applications. It can be used with your existing process monitoring tools and the container will be spawned as a direct child of the process supervisor.
Containers are configured using bundles. A bundle for a container is a directory that includes a specification file named "` + specConfig + `". Bundle contents will depend on the container type.
To start a new instance of a container:
# runhcs run [ -b bundle ] <container-id>
Where "<container-id>" is your name for the instance of the container that you are starting. The name you provide for the container instance must be unique on your host. Providing the bundle directory using "-b" is optional. The default value for "bundle" is the current directory.`
)
func main() {
// Provider ID: 0b52781f-b24d-5685-ddf6-69830ed40ec3
// Hook isn't closed explicitly, as it will exist until process exit.
if hook, err := etwlogrus.NewHook("Microsoft.Virtualization.RunHCS"); err == nil {
logrus.AddHook(hook)
} else {
logrus.Error(err)
}
app := cli.NewApp()
app.Name = "runhcs"
app.Usage = usage
var v []string
if version != "" {
v = append(v, version)
}
if gitCommit != "" {
v = append(v, fmt.Sprintf("commit: %s", gitCommit))
}
v = append(v, fmt.Sprintf("spec: %s", specs.Version))
app.Version = strings.Join(v, "\n")
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output for logging",
},
cli.StringFlag{
Name: "log",
Value: "nul",
Usage: `set the log file path or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-log) where internal debug information is written`,
},
cli.StringFlag{
Name: "log-format",
Value: "text",
Usage: "set the format used by logs ('text' (default), or 'json')",
},
cli.StringFlag{
Name: "owner",
Value: "runhcs",
Usage: "compute system owner",
},
cli.StringFlag{
Name: "root",
Value: "default",
Usage: "registry key for storage of container state",
},
}
app.Commands = []cli.Command{
createCommand,
createScratchCommand,
deleteCommand,
// eventsCommand,
execCommand,
killCommand,
listCommand,
pauseCommand,
psCommand,
resizeTtyCommand,
resumeCommand,
runCommand,
shimCommand,
startCommand,
stateCommand,
// updateCommand,
vmshimCommand,
}
app.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
if path := context.GlobalString("log"); path != "" {
var f io.Writer
var err error
if strings.HasPrefix(path, runhcs.SafePipePrefix) {
f, err = winio.DialPipe(path, nil)
} else {
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666)
}
if err != nil {
return err
}
logrus.SetOutput(f)
}
switch logFormat = context.GlobalString("log-format"); logFormat {
case "text":
// retain logrus's default.
case "json":
logrus.SetFormatter(new(logrus.JSONFormatter))
default:
return fmt.Errorf("unknown log-format %q", logFormat)
}
var err error
stateKey, err = regstate.Open(context.GlobalString("root"), false)
if err != nil {
return err
}
return nil
}
// If the command returns an error, cli takes upon itself to print
// the error on cli.ErrWriter and exit.
// Use our own writer here to ensure the log gets sent to the right location.
fatalWriter.Writer = cli.ErrWriter
cli.ErrWriter = &fatalWriter
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(cli.ErrWriter, err)
os.Exit(1)
}
}
type logErrorWriter struct {
Writer io.Writer
}
var fatalWriter logErrorWriter
func (f *logErrorWriter) Write(p []byte) (n int, err error) {
logrus.Error(string(p))
return f.Writer.Write(p)
}

View File

@@ -0,0 +1,58 @@
package main
import (
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var pauseCommand = cli.Command{
Name: "pause",
Usage: "pause suspends all processes inside the container",
ArgsUsage: `<container-id>
Where "<container-id>" is the name for the instance of the container to be
paused. `,
Description: `The pause command suspends all processes in the instance of the container.
Use runhcs list to identify instances of containers and their current status.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, true)
if err != nil {
return err
}
defer container.Close()
if err := container.hc.Pause(); err != nil {
return err
}
return nil
},
}
var resumeCommand = cli.Command{
Name: "resume",
Usage: "resumes all processes that have been previously paused",
ArgsUsage: `<container-id>
Where "<container-id>" is the name for the instance of the container to be
resumed.`,
Description: `The resume command resumes all processes in the instance of the container.
Use runhcs list to identify instances of containers and their current status.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, true)
if err != nil {
return err
}
defer container.Close()
if err := container.hc.Resume(); err != nil {
return err
}
return nil
},
}

51
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/ps.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/urfave/cli"
)
var psCommand = cli.Command{
Name: "ps",
Usage: "ps displays the processes running inside a container",
ArgsUsage: `<container-id> [ps options]`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Value: "json",
Usage: `select one of: ` + formatOptions,
},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, true)
if err != nil {
return err
}
defer container.Close()
props, err := container.hc.Properties(schema1.PropertyTypeProcessList)
if err != nil {
return err
}
var pids []int
for _, p := range props.ProcessList {
pids = append(pids, int(p.ProcessId))
}
switch context.String("format") {
case "json":
return json.NewEncoder(os.Stdout).Encode(pids)
default:
return fmt.Errorf("invalid format option")
}
},
SkipArgReorder: true,
}

Binary file not shown.

Binary file not shown.

64
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/run.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
package main
import (
"os"
"syscall"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
// default action is to start a container
var runCommand = cli.Command{
Name: "run",
Usage: "create and run a container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.`,
Description: `The run command creates an instance of a container for a bundle. The bundle
is a directory with a specification file named "` + specConfig + `" and a root
filesystem.
The specification file includes an args parameter. The args parameter is used
to specify command(s) that get run when the container is started. To change the
command(s) that get executed on start, edit the args parameter of the spec.`,
Flags: append(createRunFlags,
cli.BoolFlag{
Name: "detach, d",
Usage: "detach from the container's process",
},
),
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
cfg, err := containerConfigFromContext(context)
if err != nil {
return err
}
c, err := createContainer(cfg)
if err != nil {
return err
}
if err != nil {
return err
}
p, err := os.FindProcess(c.ShimPid)
if err != nil {
return err
}
err = c.Exec()
if err != nil {
return err
}
if !context.Bool("detach") {
state, err := p.Wait()
if err != nil {
return err
}
c.Remove()
os.Exit(int(state.Sys().(syscall.WaitStatus).ExitCode))
}
return nil
},
}

View File

@@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<description>runhcs</description>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
</assembly>

323
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/shim.go generated vendored Normal file
View File

@@ -0,0 +1,323 @@
package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strings"
"sync"
"time"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/Microsoft/hcsshim/internal/schema2"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"golang.org/x/sys/windows"
)
func containerPipePath(id string) string {
return runhcs.SafePipePath("runhcs-shim-" + id)
}
func newFile(context *cli.Context, param string) *os.File {
fd := uintptr(context.Int(param))
if fd == 0 {
return nil
}
return os.NewFile(fd, "")
}
var shimCommand = cli.Command{
Name: "shim",
Usage: `launch the process and proxy stdio (do not call it outside of runhcs)`,
Hidden: true,
Flags: []cli.Flag{
&cli.IntFlag{Name: "stdin", Hidden: true},
&cli.IntFlag{Name: "stdout", Hidden: true},
&cli.IntFlag{Name: "stderr", Hidden: true},
&cli.BoolFlag{Name: "exec", Hidden: true},
cli.StringFlag{Name: "log-pipe", Hidden: true},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
logPipe := context.String("log-pipe")
if logPipe != "" {
lpc, err := winio.DialPipe(logPipe, nil)
if err != nil {
return err
}
defer lpc.Close()
logrus.SetOutput(lpc)
} else {
logrus.SetOutput(os.Stderr)
}
fatalWriter.Writer = os.Stdout
id := context.Args().First()
c, err := getContainer(id, true)
if err != nil {
return err
}
defer c.Close()
// Asynchronously wait for the container to exit.
containerExitCh := make(chan error)
go func() {
containerExitCh <- c.hc.WaitExpectedError(hcs.ErrAlreadyClosed)
}()
// Get File objects for the open stdio files passed in as arguments.
stdin := newFile(context, "stdin")
stdout := newFile(context, "stdout")
stderr := newFile(context, "stderr")
exec := context.Bool("exec")
terminateOnFailure := false
errorOut := io.WriteCloser(os.Stdout)
var spec *specs.Process
if exec {
// Read the process spec from stdin.
specj, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
os.Stdin.Close()
spec = new(specs.Process)
err = json.Unmarshal(specj, spec)
if err != nil {
return err
}
} else {
// Stdin is not used.
os.Stdin.Close()
// Listen on the named pipe associated with this container.
l, err := winio.ListenPipe(c.ShimPipePath(), nil)
if err != nil {
return err
}
// Alert the parent process that initialization has completed
// successfully.
errorOut.Write(runhcs.ShimSuccess)
errorOut.Close()
fatalWriter.Writer = ioutil.Discard
// When this process exits, clear this process's pid in the registry.
defer func() {
stateKey.Set(id, keyShimPid, 0)
}()
defer func() {
if terminateOnFailure {
if err = c.hc.Terminate(); hcs.IsPending(err) {
<-containerExitCh
}
}
}()
terminateOnFailure = true
// Wait for a connection to the named pipe, exiting if the container
// exits before this happens.
var pipe net.Conn
pipeCh := make(chan error)
go func() {
var err error
pipe, err = l.Accept()
pipeCh <- err
}()
select {
case err = <-pipeCh:
if err != nil {
return err
}
case err = <-containerExitCh:
if err != nil {
return err
}
return cli.NewExitError("", 1)
}
// The next set of errors goes to the open pipe connection.
errorOut = pipe
fatalWriter.Writer = pipe
// The process spec comes from the original container spec.
spec = c.Spec.Process
}
// Create the process in the container.
var wpp *hcsschema.ProcessParameters // Windows Process Parameters
var lpp *lcow.ProcessParameters // Linux Process Parameters
var p *hcs.Process
if c.Spec.Linux == nil {
environment := make(map[string]string)
for _, v := range spec.Env {
s := strings.SplitN(v, "=", 2)
if len(s) == 2 && len(s[1]) > 0 {
environment[s[0]] = s[1]
}
}
wpp = &hcsschema.ProcessParameters{
WorkingDirectory: spec.Cwd,
EmulateConsole: spec.Terminal,
Environment: environment,
User: spec.User.Username,
}
for i, arg := range spec.Args {
e := windows.EscapeArg(arg)
if i == 0 {
wpp.CommandLine = e
} else {
wpp.CommandLine += " " + e
}
}
if spec.ConsoleSize != nil {
wpp.ConsoleSize = []int32{
int32(spec.ConsoleSize.Height),
int32(spec.ConsoleSize.Width),
}
}
wpp.CreateStdInPipe = stdin != nil
wpp.CreateStdOutPipe = stdout != nil
wpp.CreateStdErrPipe = stderr != nil
p, err = c.hc.CreateProcess(wpp)
} else {
lpp = &lcow.ProcessParameters{}
if exec {
lpp.OCIProcess = spec
}
lpp.CreateStdInPipe = stdin != nil
lpp.CreateStdOutPipe = stdout != nil
lpp.CreateStdErrPipe = stderr != nil
p, err = c.hc.CreateProcess(lpp)
}
if err != nil {
return err
}
cstdin, cstdout, cstderr, err := p.Stdio()
if err != nil {
return err
}
if !exec {
err = stateKey.Set(c.ID, keyInitPid, p.Pid())
if err != nil {
return err
}
}
// Store the Guest pid map
err = stateKey.Set(c.ID, fmt.Sprintf(keyPidMapFmt, os.Getpid()), p.Pid())
if err != nil {
return err
}
defer func() {
// Remove the Guest pid map when this process is cleaned up
stateKey.Clear(c.ID, fmt.Sprintf(keyPidMapFmt, os.Getpid()))
}()
terminateOnFailure = false
// Alert the connected process that the process was launched
// successfully.
errorOut.Write(runhcs.ShimSuccess)
errorOut.Close()
fatalWriter.Writer = ioutil.Discard
// Relay stdio.
var wg sync.WaitGroup
if cstdin != nil {
go func() {
io.Copy(cstdin, stdin)
cstdin.Close()
p.CloseStdin()
}()
}
if cstdout != nil {
wg.Add(1)
go func() {
io.Copy(stdout, cstdout)
stdout.Close()
cstdout.Close()
wg.Done()
}()
}
if cstderr != nil {
wg.Add(1)
go func() {
io.Copy(stderr, cstderr)
stderr.Close()
cstderr.Close()
wg.Done()
}()
}
err = p.Wait()
wg.Wait()
// Attempt to get the exit code from the process.
code := 1
if err == nil {
code, err = p.ExitCode()
if err != nil {
code = 1
}
}
if !exec {
// Shutdown the container, waiting 5 minutes before terminating is
// forcefully.
const shutdownTimeout = time.Minute * 5
waited := false
err = c.hc.Shutdown()
if hcs.IsPending(err) {
select {
case err = <-containerExitCh:
waited = true
case <-time.After(shutdownTimeout):
err = hcs.ErrTimeout
}
}
if hcs.IsAlreadyStopped(err) {
err = nil
}
if err != nil {
err = c.hc.Terminate()
if waited {
err = c.hc.Wait()
} else {
err = <-containerExitCh
}
}
}
return cli.NewExitError("", code)
},
}

View File

@@ -0,0 +1,46 @@
package main
var signalMapLcow = map[string]int{
"ABRT": 0x6,
"ALRM": 0xe,
"BUS": 0x7,
"CHLD": 0x11,
"CLD": 0x11,
"CONT": 0x12,
"FPE": 0x8,
"HUP": 0x1,
"ILL": 0x4,
"INT": 0x2,
"IO": 0x1d,
"IOT": 0x6,
"KILL": 0x9,
"PIPE": 0xd,
"POLL": 0x1d,
"PROF": 0x1b,
"PWR": 0x1e,
"QUIT": 0x3,
"SEGV": 0xb,
"STKFLT": 0x10,
"STOP": 0x13,
"SYS": 0x1f,
"TERM": 0xf,
"TRAP": 0x5,
"TSTP": 0x14,
"TTIN": 0x15,
"TTOU": 0x16,
"URG": 0x17,
"USR1": 0xa,
"USR2": 0xc,
"VTALRM": 0x1a,
"WINCH": 0x1c,
"XCPU": 0x18,
"XFSZ": 0x19,
}
var signalMapWindows = map[string]int{
"CTRLC": 0x0,
"CTRLBREAK": 0x1,
"CTRLCLOSE": 0x2,
"CTRLLOGOFF": 0x5,
"CTRLSHUTDOWN": 0x6,
}

42
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/spec.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/urfave/cli"
)
// loadSpec loads the specification from the provided path.
func loadSpec(cPath string) (spec *specs.Spec, err error) {
cf, err := os.Open(cPath)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("JSON specification file %s not found", cPath)
}
return nil, err
}
defer cf.Close()
if err = json.NewDecoder(cf).Decode(&spec); err != nil {
return nil, err
}
return spec, nil
}
// setupSpec performs initial setup based on the cli.Context for the container
func setupSpec(context *cli.Context) (*specs.Spec, error) {
bundle := context.String("bundle")
if bundle != "" {
if err := os.Chdir(bundle); err != nil {
return nil, err
}
}
spec, err := loadSpec(specConfig)
if err != nil {
return nil, err
}
return spec, nil
}

View File

@@ -0,0 +1,43 @@
package main
import (
"errors"
"fmt"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var startCommand = cli.Command{
Name: "start",
Usage: "executes the user defined process in a created container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.`,
Description: `The start command executes the user defined process in a created container.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, false)
if err != nil {
return err
}
defer container.Close()
status, err := container.Status()
if err != nil {
return err
}
switch status {
case containerCreated:
return container.Exec()
case containerStopped:
return errors.New("cannot start a container that has stopped")
case containerRunning:
return errors.New("cannot start an already running container")
default:
return fmt.Errorf("cannot start a container in the '%s' state", status)
}
},
}

View File

@@ -0,0 +1,49 @@
package main
import (
"encoding/json"
"os"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/urfave/cli"
)
var stateCommand = cli.Command{
Name: "state",
Usage: "output the state of a container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container.`,
Description: `The state command outputs current state information for the
instance of a container.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
c, err := getContainer(id, false)
if err != nil {
return err
}
defer c.Close()
status, err := c.Status()
if err != nil {
return err
}
cs := runhcs.ContainerState{
Version: c.Spec.Version,
ID: c.ID,
InitProcessPid: c.ShimPid,
Status: string(status),
Bundle: c.Bundle,
Rootfs: c.Rootfs,
Created: c.Created,
Annotations: c.Spec.Annotations,
}
data, err := json.MarshalIndent(cs, "", " ")
if err != nil {
return err
}
os.Stdout.Write(data)
return nil
},
}

56
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/tty.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
package main
import (
"fmt"
"strconv"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var resizeTtyCommand = cli.Command{
Name: "resize-tty",
Usage: "resize-tty updates the terminal size for a container process",
ArgsUsage: `<container-id> <width> <height>`,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "pid, p",
Usage: "the process pid (defaults to init pid)",
},
},
Before: appargs.Validate(
argID,
appargs.Int(10, 1, 65535),
appargs.Int(10, 1, 65535),
),
Action: func(context *cli.Context) error {
id := context.Args()[0]
width, _ := strconv.ParseUint(context.Args()[1], 10, 16)
height, _ := strconv.ParseUint(context.Args()[2], 10, 16)
c, err := getContainer(id, true)
if err != nil {
return err
}
defer c.Close()
pid := context.Int("pid")
if pid == 0 {
if err := stateKey.Get(id, keyInitPid, &pid); err != nil {
return err
}
} else {
// If a pid was provided map it to its hcs pid.
if err := stateKey.Get(id, fmt.Sprintf(keyPidMapFmt, pid), &pid); err != nil {
return err
}
}
p, err := c.hc.OpenProcess(pid)
if err != nil {
return err
}
defer p.Close()
return p.ResizeConsole(uint16(width), uint16(height))
},
}

View File

@@ -0,0 +1,52 @@
package main
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/runhcs"
)
var argID = appargs.NonEmptyString
func absPathOrEmpty(path string) (string, error) {
if path == "" {
return "", nil
}
if strings.HasPrefix(path, runhcs.SafePipePrefix) {
if len(path) > len(runhcs.SafePipePrefix) {
return runhcs.SafePipePath(path[len(runhcs.SafePipePrefix):]), nil
}
}
return filepath.Abs(path)
}
// createPidFile creates a file with the processes pid inside it atomically
// it creates a temp file with the paths filename + '.' infront of it
// then renames the file
func createPidFile(path string, pid int) error {
var (
tmpDir = filepath.Dir(path)
tmpName = filepath.Join(tmpDir, fmt.Sprintf(".%s", filepath.Base(path)))
)
f, err := os.OpenFile(tmpName, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666)
if err != nil {
return err
}
_, err = fmt.Fprintf(f, "%d", pid)
f.Close()
if err != nil {
return err
}
return os.Rename(tmpName, path)
}
func closeWritePipe(pipe net.Conn) error {
return pipe.(interface {
CloseWrite() error
}).CloseWrite()
}

View File

@@ -0,0 +1,39 @@
package main
import (
"os"
"testing"
"github.com/Microsoft/hcsshim/internal/runhcs"
)
func Test_AbsPathOrEmpty(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get test wd: %v", err)
}
tests := []string{
"",
runhcs.SafePipePrefix + "test",
runhcs.SafePipePrefix + "test with spaces",
"test",
"C:\\test..\\test",
}
expected := []string{
"",
runhcs.SafePipePrefix + "test",
runhcs.SafePipePrefix + "test%20with%20spaces",
wd + "\\test",
"C:\\test..\\test",
}
for i, test := range tests {
actual, err := absPathOrEmpty(test)
if err != nil {
t.Fatalf("absPathOrEmpty: error '%v'", err)
}
if actual != expected[i] {
t.Fatalf("absPathOrEmpty: actual '%s' != '%s'", actual, expected[i])
}
}
}

View File

@@ -0,0 +1,43 @@
{
"FixedFileInfo": {
"FileVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"ProductVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"FileFlagsMask": "3f",
"FileFlags ": "00",
"FileOS": "040004",
"FileType": "01",
"FileSubType": "00"
},
"StringFileInfo": {
"Comments": "",
"CompanyName": "",
"FileDescription": "",
"FileVersion": "",
"InternalName": "",
"LegalCopyright": "",
"LegalTrademarks": "",
"OriginalFilename": "",
"PrivateBuild": "",
"ProductName": "",
"ProductVersion": "v1.0.0.0",
"SpecialBuild": ""
},
"VarFileInfo": {
"Translation": {
"LangID": "0409",
"CharsetID": "04B0"
}
},
"IconPath": "",
"ManifestPath": "runhcs.exe.manifest"
}

209
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/vm.go generated vendored Normal file
View File

@@ -0,0 +1,209 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"syscall"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func vmID(id string) string {
return id + "@vm"
}
var vmshimCommand = cli.Command{
Name: "vmshim",
Usage: `launch a VM and containers inside it (do not call it outside of runhcs)`,
Hidden: true,
Flags: []cli.Flag{
cli.StringFlag{Name: "log-pipe", Hidden: true},
cli.StringFlag{Name: "os", Hidden: true},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
logPipe := context.String("log-pipe")
if logPipe != "" {
lpc, err := winio.DialPipe(logPipe, nil)
if err != nil {
return err
}
defer lpc.Close()
logrus.SetOutput(lpc)
} else {
logrus.SetOutput(os.Stderr)
}
fatalWriter.Writer = os.Stdout
pipePath := context.Args().First()
optsj, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
os.Stdin.Close()
var opts interface{}
isLCOW := context.String("os") == "linux"
if isLCOW {
opts = &uvm.OptionsLCOW{}
} else {
opts = &uvm.OptionsWCOW{}
}
err = json.Unmarshal(optsj, opts)
if err != nil {
return err
}
// Listen on the named pipe associated with this VM.
l, err := winio.ListenPipe(pipePath, &winio.PipeConfig{MessageMode: true})
if err != nil {
return err
}
var vm *uvm.UtilityVM
if isLCOW {
vm, err = uvm.CreateLCOW(opts.(*uvm.OptionsLCOW))
} else {
vm, err = uvm.CreateWCOW(opts.(*uvm.OptionsWCOW))
}
if err != nil {
return err
}
defer vm.Close()
if err = vm.Start(); err != nil {
return err
}
// Asynchronously wait for the VM to exit.
exitCh := make(chan error)
go func() {
exitCh <- vm.Wait()
}()
defer vm.Terminate()
// Alert the parent process that initialization has completed
// successfully.
os.Stdout.Write(runhcs.ShimSuccess)
os.Stdout.Close()
fatalWriter.Writer = ioutil.Discard
pipeCh := make(chan net.Conn)
go func() {
for {
conn, err := l.Accept()
if err != nil {
logrus.Error(err)
continue
}
pipeCh <- conn
}
}()
for {
select {
case <-exitCh:
return nil
case pipe := <-pipeCh:
err = processRequest(vm, pipe)
if err == nil {
_, err = pipe.Write(runhcs.ShimSuccess)
// Wait until the pipe is closed before closing the
// container so that it is properly handed off to the other
// process.
if err == nil {
err = closeWritePipe(pipe)
}
if err == nil {
ioutil.ReadAll(pipe)
}
} else {
logrus.WithError(err).
Error("failed creating container in VM")
fmt.Fprintf(pipe, "%v", err)
}
pipe.Close()
}
}
},
}
func processRequest(vm *uvm.UtilityVM, pipe net.Conn) error {
var req runhcs.VMRequest
err := json.NewDecoder(pipe).Decode(&req)
if err != nil {
return err
}
logrus.WithFields(logrus.Fields{
logfields.ContainerID: req.ID,
logfields.VMShimOperation: req.Op,
}).Debug("process request")
c, err := getContainer(req.ID, false)
if err != nil {
return err
}
defer func() {
if c != nil {
c.Close()
}
}()
switch req.Op {
case runhcs.OpCreateContainer:
err = createContainerInHost(c, vm)
if err != nil {
return err
}
c2 := c
c = nil
go func() {
c2.hc.Wait()
c2.Close()
}()
case runhcs.OpUnmountContainer, runhcs.OpUnmountContainerDiskOnly:
err = c.unmountInHost(vm, req.Op == runhcs.OpUnmountContainer)
if err != nil {
return err
}
case runhcs.OpSyncNamespace:
return errors.New("Not implemented")
default:
panic("unknown operation")
}
return nil
}
type noVMError struct {
ID string
}
func (err *noVMError) Error() string {
return "VM " + err.ID + " cannot be contacted"
}
func (c *container) issueVMRequest(op runhcs.VMRequestOp) error {
req := runhcs.VMRequest{
ID: c.ID,
Op: op,
}
if err := runhcs.IssueVMRequest(c.VMPipePath(), &req); err != nil {
if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND {
return &noVMError{c.HostID}
}
return err
}
return nil
}

View File

@@ -0,0 +1,64 @@
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/Microsoft/hcsshim/ext4/tar2ext4"
)
var (
input = flag.String("i", "", "input file")
output = flag.String("o", "", "output file")
overlay = flag.Bool("overlay", false, "produce overlayfs-compatible layer image")
vhd = flag.Bool("vhd", false, "add a VHD footer to the end of the image")
inlineData = flag.Bool("inline", false, "write small file data into the inode; not compatible with DAX")
)
func main() {
flag.Parse()
if flag.NArg() != 0 || len(*output) == 0 {
flag.Usage()
os.Exit(1)
}
err := func() (err error) {
in := os.Stdin
if *input != "" {
in, err = os.Open(*input)
if err != nil {
return err
}
}
out, err := os.Create(*output)
if err != nil {
return err
}
var opts []tar2ext4.Option
if *overlay {
opts = append(opts, tar2ext4.ConvertWhiteout)
}
if *vhd {
opts = append(opts, tar2ext4.AppendVhdFooter)
}
if *inlineData {
opts = append(opts, tar2ext4.InlineData)
}
err = tar2ext4.Convert(in, out, opts...)
if err != nil {
return err
}
// Exhaust the tar stream.
io.Copy(ioutil.Discard, in)
return nil
}()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,36 @@
package main
import (
"path/filepath"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var createCommand = cli.Command{
Name: "create",
Usage: "creates a new writable container layer",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the read-only parent layers",
},
},
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) error {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), true)
if err != nil {
return err
}
di := driverInfo
return hcsshim.CreateScratchLayer(di, path, layers[len(layers)-1], layers)
},
}

View File

@@ -0,0 +1,66 @@
package main
import (
"compress/gzip"
"io"
"os"
"path/filepath"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/ociwclayer"
"github.com/urfave/cli"
)
var exportCommand = cli.Command{
Name: "export",
Usage: "exports a layer to a tar file",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the read-only parent layers",
},
cli.StringFlag{
Name: "output, o",
Usage: "output layer tar (defaults to stdout)",
},
cli.BoolFlag{
Name: "gzip, z",
Usage: "compress output with gzip compression",
},
},
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), true)
if err != nil {
return err
}
err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege})
if err != nil {
return err
}
fp := context.String("output")
f := os.Stdout
if fp != "" {
f, err = os.Create(fp)
if err != nil {
return err
}
defer f.Close()
}
w := io.Writer(f)
if context.Bool("gzip") {
w = gzip.NewWriter(w)
}
return ociwclayer.ExportLayer(w, path, layers)
},
}

View File

@@ -0,0 +1,74 @@
package main
import (
"bufio"
"compress/gzip"
"io"
"os"
"path/filepath"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/ociwclayer"
"github.com/urfave/cli"
)
var importCommand = cli.Command{
Name: "import",
Usage: "imports a layer from a tar file",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the read-only parent layers",
},
cli.StringFlag{
Name: "input, i",
Usage: "input layer tar (defaults to stdin)",
},
},
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), false)
if err != nil {
return err
}
fp := context.String("input")
f := os.Stdin
if fp != "" {
f, err = os.Open(fp)
if err != nil {
return err
}
defer f.Close()
}
r, err := addDecompressor(f)
if err != nil {
return err
}
err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
if err != nil {
return err
}
_, err = ociwclayer.ImportLayer(r, path, layers)
return err
},
}
func addDecompressor(r io.Reader) (io.Reader, error) {
b := bufio.NewReader(r)
hdr, err := b.Peek(3)
if err != nil {
return nil, err
}
if hdr[0] == 0x1f && hdr[1] == 0x8b && hdr[2] == 8 {
return gzip.NewReader(b)
}
return b, nil
}

View File

@@ -0,0 +1,88 @@
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var mountCommand = cli.Command{
Name: "mount",
Usage: "mounts a scratch",
ArgsUsage: "<scratch path>",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the parent layers for this layer",
},
},
Action: func(context *cli.Context) (err error) {
if context.NArg() != 1 {
return errors.New("invalid usage")
}
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), true)
if err != nil {
return err
}
err = hcsshim.ActivateLayer(driverInfo, path)
if err != nil {
return err
}
defer func() {
if err != nil {
hcsshim.DeactivateLayer(driverInfo, path)
}
}()
err = hcsshim.PrepareLayer(driverInfo, path, layers)
if err != nil {
return err
}
defer func() {
if err != nil {
hcsshim.UnprepareLayer(driverInfo, path)
}
}()
mountPath, err := hcsshim.GetLayerMountPath(driverInfo, path)
if err != nil {
return err
}
_, err = fmt.Println(mountPath)
return err
},
}
var unmountCommand = cli.Command{
Name: "unmount",
Usage: "unmounts a scratch",
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
err = hcsshim.UnprepareLayer(driverInfo, path)
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
err = hcsshim.DeactivateLayer(driverInfo, path)
if err != nil {
return err
}
return nil
},
}

View File

@@ -0,0 +1,31 @@
package main
import (
"path/filepath"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var removeCommand = cli.Command{
Name: "remove",
Usage: "permanently removes a layer directory in its entirety",
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
if err != nil {
return err
}
return hcsshim.DestroyLayer(driverInfo, path)
},
}

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,43 @@
{
"FixedFileInfo": {
"FileVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"ProductVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"FileFlagsMask": "3f",
"FileFlags ": "00",
"FileOS": "040004",
"FileType": "01",
"FileSubType": "00"
},
"StringFileInfo": {
"Comments": "",
"CompanyName": "",
"FileDescription": "",
"FileVersion": "",
"InternalName": "",
"LegalCopyright": "",
"LegalTrademarks": "",
"OriginalFilename": "",
"PrivateBuild": "",
"ProductName": "",
"ProductVersion": "v1.0.0.0",
"SpecialBuild": ""
},
"VarFileInfo": {
"Translation": {
"LangID": "0409",
"CharsetID": "04B0"
}
},
"IconPath": "",
"ManifestPath": "wclayer.exe.manifest"
}

View File

@@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<description>wclayer</description>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
</assembly>

Some files were not shown because too many files have changed in this diff Show More